pantry/projects/github.com/ggerganov/llama.cpp/package.yml

58 lines
1.3 KiB
YAML
Raw Normal View History

distributable:
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/master-fff0e0e.tar.gz
strip-components: 1
versions:
- 2023.07.20
2023-07-30 15:00:35 +03:00
display-name:
LLaMA.cpp
provides:
- bin/llama.cpp
# NOTE! we do not “provide” convert.py. ∵ its too generic
# do `tea +github.comggerganovllama.cpp convert.py`
platforms:
- linux
- darwin/aarch64
# Illegal instruction: 4 on darwin/x86-64
dependencies:
python.org: ^3.11
tea.xyz: ^0 # the scripts use tea/cli
build:
dependencies:
tea.xyz/gx/cc: c99
tea.xyz/gx/make: '*'
gnu.org/coreutils: '*'
env:
VIRTUAL_ENV: ${{prefix}}/venv
script:
- |
make --jobs {{hw.concurrency}}
install -D main {{prefix}}/libexec/llama.cpp
install -D props/llama.cpp {{prefix}}/bin/llama.cpp
- |
mkdir -p {{prefix}}/share
mv prompts {{prefix}}/share
mv props/llama-fetch {{prefix}}/libexec
- |
install -D convert.py $VIRTUAL_ENV/bin/convert.py
python-venv-stubber.sh convert.py
- python -m venv $VIRTUAL_ENV
- |
source $VIRTUAL_ENV/bin/activate
pip install -r requirements.txt
deactivate
test: |
{{prefix}}/libexec/llama.cpp --help
# testing more than this requires downloading the models 😬
entrypoint: llama.cpp