mirror of
https://github.com/ivabus/pantry
synced 2024-11-14 12:35:10 +03:00
a8574519fc
closes #6526
89 lines
2.4 KiB
YAML
89 lines
2.4 KiB
YAML
distributable:
|
||
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b{{version.raw}}.tar.gz
|
||
strip-components: 1
|
||
|
||
versions:
|
||
github: ggerganov/llama.cpp/tags
|
||
strip: /^b/
|
||
|
||
display-name: LLaMA.cpp
|
||
|
||
provides:
|
||
- bin/llama.cpp
|
||
# NOTE! we do not “provide” convert.py. ∵ it’s too generic
|
||
# do `tea +github.com∕ggerganov∕llama.cpp convert.py`
|
||
|
||
platforms:
|
||
- linux
|
||
- darwin/aarch64
|
||
# Illegal instruction: 4 on darwin/x86-64
|
||
|
||
dependencies:
|
||
pkgx.sh: ^1
|
||
linux:
|
||
gnu.org/gcc: '*' # clang doesn't provide omp.h, and we need libstdc++
|
||
|
||
build:
|
||
dependencies:
|
||
gnu.org/coreutils: '*'
|
||
git-scm.org: '*'
|
||
curl.se: '*'
|
||
python.org: ~3.11
|
||
env:
|
||
VIRTUAL_ENV: ${{prefix}}/venv
|
||
script:
|
||
# segfaults on some GHA runners
|
||
- run: sed -i -e's/\(MK_.* -march=native -mtune=native\)/#\1/g' Makefile
|
||
if: linux/x86-64
|
||
|
||
# this commit breaks linux/aarch64 - fixed in 1732
|
||
- run: curl -LSs 'https://github.com/ggerganov/llama.cpp/pull/4630/commits/42f5246effafddcf87d67656b58e95030f4bc454.patch' | patch -p1 -R
|
||
if: '>=1705<1732'
|
||
|
||
- make --jobs {{hw.concurrency}}
|
||
|
||
- |
|
||
if test -f main; then
|
||
install -D main {{prefix}}/bin/llama.cpp
|
||
elif test -f llama-cli; then
|
||
install -D llama-cli {{prefix}}/bin/llama.cpp
|
||
else
|
||
echo "No binary found"
|
||
exit 1
|
||
fi
|
||
install -D props/entrypoint.sh {{prefix}}/entrypoint.sh
|
||
if test -f ggml-metal.metal; then
|
||
install -D ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
|
||
elif test -f ggml/src/ggml-metal.metal; then
|
||
install -D ggml/src/ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
|
||
else
|
||
echo "No ggml-metal.metal found"
|
||
exit 1
|
||
fi
|
||
|
||
- |
|
||
mkdir -p {{prefix}}/share
|
||
mv prompts {{prefix}}/share
|
||
|
||
- |
|
||
if test -f convert.py; then
|
||
install -D convert.py $VIRTUAL_ENV/bin/convert.py
|
||
elif test -f examples/convert-legacy-llama.py; then
|
||
install -D examples/convert-legacy-llama.py $VIRTUAL_ENV/bin/convert.py
|
||
else
|
||
echo "No convert.py found"
|
||
false
|
||
fi
|
||
|
||
- |
|
||
bkpyvenv stage {{prefix}} {{version}}
|
||
$VIRTUAL_ENV/bin/pip install -r requirements.txt
|
||
bkpyvenv seal {{prefix}} convert.py
|
||
|
||
test:
|
||
llama.cpp --version
|
||
# broke in 2453, difficult to filter due to calver tweaks
|
||
# llama.cpp --help
|
||
# ^^ testing more than this requires downloading the models 😬
|
||
|
||
entrypoint: ./entrypoint.sh
|