pantry/projects/github.com/ggerganov/llama.cpp/package.yml

90 lines
2.4 KiB
YAML
Raw Normal View History

distributable:
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b{{version.raw}}.tar.gz
strip-components: 1
versions:
github: ggerganov/llama.cpp/tags
strip: /^b/
2023-12-27 18:12:17 +03:00
display-name: LLaMA.cpp
2023-07-30 15:00:35 +03:00
provides:
- bin/llama.cpp
# NOTE! we do not “provide” convert.py. ∵ its too generic
# do `tea +github.comggerganovllama.cpp convert.py`
platforms:
- linux
- darwin/aarch64
# Illegal instruction: 4 on darwin/x86-64
dependencies:
pkgx.sh: ^1
linux:
gnu.org/gcc: '*' # clang doesn't provide omp.h, and we need libstdc++
build:
dependencies:
gnu.org/coreutils: '*'
git-scm.org: '*'
2023-12-27 18:12:17 +03:00
curl.se: '*'
python.org: ~3.11
env:
VIRTUAL_ENV: ${{prefix}}/venv
script:
# segfaults on some GHA runners
- run: sed -i -e's/\(MK_.* -march=native -mtune=native\)/#\1/g' Makefile
if: linux/x86-64
2023-12-27 18:12:17 +03:00
# this commit breaks linux/aarch64 - fixed in 1732
2023-12-27 18:12:17 +03:00
- run: curl -LSs 'https://github.com/ggerganov/llama.cpp/pull/4630/commits/42f5246effafddcf87d67656b58e95030f4bc454.patch' | patch -p1 -R
if: '>=1705<1732'
2023-12-27 18:12:17 +03:00
- make --jobs {{hw.concurrency}}
- |
if test -f main; then
install -D main {{prefix}}/bin/llama.cpp
elif test -f llama-cli; then
install -D llama-cli {{prefix}}/bin/llama.cpp
else
echo "No binary found"
exit 1
fi
install -D props/entrypoint.sh {{prefix}}/entrypoint.sh
2024-06-27 01:26:06 +03:00
if test -f ggml-metal.metal; then
install -D ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
elif test -f ggml/src/ggml-metal.metal; then
install -D ggml/src/ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
else
echo "No ggml-metal.metal found"
exit 1
fi
- |
mkdir -p {{prefix}}/share
mv prompts {{prefix}}/share
2024-05-30 19:08:21 +03:00
- |
if test -f convert.py; then
install -D convert.py $VIRTUAL_ENV/bin/convert.py
elif test -f examples/convert-legacy-llama.py; then
install -D examples/convert-legacy-llama.py $VIRTUAL_ENV/bin/convert.py
else
echo "No convert.py found"
false
fi
- |
bkpyvenv stage {{prefix}} {{version}}
$VIRTUAL_ENV/bin/pip install -r requirements.txt
bkpyvenv seal {{prefix}} convert.py
test:
llama.cpp --version
# broke in 2453, difficult to filter due to calver tweaks
# llama.cpp --help
# ^^ testing more than this requires downloading the models 😬
entrypoint: ./entrypoint.sh