mirror of
https://github.com/ivabus/pantry
synced 2024-11-29 11:45:07 +03:00
2b06942c62
* llama.cpp, github version instead of hardcoded version * llama.cpp, check if model is specified, if yes, run it, if not, then download model * Use entrypoint for custom llama.cpp invocation * `llama.cpp` is just raw executable. This I think is our new pattern. * To run chat use the entrypoint: `pkgx +brewkit -- run llama.cpp` Co-authored-by: James Reynolds <magnsuviri@me.com> Co-authored-by: Max Howell <mxcl@me.com>
57 lines
1.3 KiB
YAML
57 lines
1.3 KiB
YAML
distributable:
|
||
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b{{version.raw}}.tar.gz
|
||
strip-components: 1
|
||
|
||
versions:
|
||
github: ggerganov/llama.cpp/tags
|
||
strip: /^b/
|
||
|
||
display-name:
|
||
LLaMA.cpp
|
||
|
||
provides:
|
||
- bin/llama.cpp
|
||
# NOTE! we do not “provide” convert.py. ∵ it’s too generic
|
||
# do `tea +github.com∕ggerganov∕llama.cpp convert.py`
|
||
|
||
platforms:
|
||
- linux
|
||
- darwin/aarch64
|
||
# Illegal instruction: 4 on darwin/x86-64
|
||
|
||
dependencies:
|
||
python.org: ~3.11
|
||
pkgx.sh: ^1
|
||
|
||
build:
|
||
dependencies:
|
||
gnu.org/coreutils: '*'
|
||
env:
|
||
VIRTUAL_ENV: ${{prefix}}/venv
|
||
script:
|
||
- make --jobs {{hw.concurrency}}
|
||
|
||
- |
|
||
install -D main {{prefix}}/bin/llama.cpp
|
||
install -D props/entrypoint.sh {{prefix}}/entrypoint.sh
|
||
install -D ggml-metal.metal {{prefix}}/bin/ggml-metal.metal
|
||
|
||
- |
|
||
mkdir -p {{prefix}}/share
|
||
mv prompts {{prefix}}/share
|
||
|
||
- |
|
||
install -D convert.py $VIRTUAL_ENV/bin/convert.py
|
||
python-venv-stubber.sh convert.py
|
||
|
||
- |
|
||
python -m venv $VIRTUAL_ENV
|
||
source $VIRTUAL_ENV/bin/activate
|
||
pip install -r requirements.txt
|
||
deactivate
|
||
|
||
test:
|
||
'{{prefix}}/bin/llama.cpp --help'
|
||
# ^^ testing more than this requires downloading the models 😬
|
||
|
||
entrypoint: ./entrypoint.sh
|