pantry/projects/github.com/ggerganov/llama.cpp/package.yml
Jacob Heider a8574519fc
fix(llama.cpp3240)
closes #6526
2024-06-26 18:26:06 -04:00

89 lines
2.4 KiB
YAML
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

distributable:
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b{{version.raw}}.tar.gz
strip-components: 1
versions:
github: ggerganov/llama.cpp/tags
strip: /^b/
display-name: LLaMA.cpp
provides:
- bin/llama.cpp
# NOTE! we do not “provide” convert.py. ∵ its too generic
# do `tea +github.comggerganovllama.cpp convert.py`
platforms:
- linux
- darwin/aarch64
# Illegal instruction: 4 on darwin/x86-64
dependencies:
pkgx.sh: ^1
linux:
gnu.org/gcc: '*' # clang doesn't provide omp.h, and we need libstdc++
build:
dependencies:
gnu.org/coreutils: '*'
git-scm.org: '*'
curl.se: '*'
python.org: ~3.11
env:
VIRTUAL_ENV: ${{prefix}}/venv
script:
# segfaults on some GHA runners
- run: sed -i -e's/\(MK_.* -march=native -mtune=native\)/#\1/g' Makefile
if: linux/x86-64
# this commit breaks linux/aarch64 - fixed in 1732
- run: curl -LSs 'https://github.com/ggerganov/llama.cpp/pull/4630/commits/42f5246effafddcf87d67656b58e95030f4bc454.patch' | patch -p1 -R
if: '>=1705<1732'
- make --jobs {{hw.concurrency}}
- |
if test -f main; then
install -D main {{prefix}}/bin/llama.cpp
elif test -f llama-cli; then
install -D llama-cli {{prefix}}/bin/llama.cpp
else
echo "No binary found"
exit 1
fi
install -D props/entrypoint.sh {{prefix}}/entrypoint.sh
if test -f ggml-metal.metal; then
install -D ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
elif test -f ggml/src/ggml-metal.metal; then
install -D ggml/src/ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
else
echo "No ggml-metal.metal found"
exit 1
fi
- |
mkdir -p {{prefix}}/share
mv prompts {{prefix}}/share
- |
if test -f convert.py; then
install -D convert.py $VIRTUAL_ENV/bin/convert.py
elif test -f examples/convert-legacy-llama.py; then
install -D examples/convert-legacy-llama.py $VIRTUAL_ENV/bin/convert.py
else
echo "No convert.py found"
false
fi
- |
bkpyvenv stage {{prefix}} {{version}}
$VIRTUAL_ENV/bin/pip install -r requirements.txt
bkpyvenv seal {{prefix}} convert.py
test:
llama.cpp --version
# broke in 2453, difficult to filter due to calver tweaks
# llama.cpp --help
# ^^ testing more than this requires downloading the models 😬
entrypoint: ./entrypoint.sh