2023-03-25 00:53:39 +03:00
|
|
|
|
distributable:
|
2023-07-24 23:43:32 +03:00
|
|
|
|
url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/master-fff0e0e.tar.gz
|
2023-03-25 00:53:39 +03:00
|
|
|
|
strip-components: 1
|
|
|
|
|
|
|
|
|
|
versions:
|
2023-07-24 23:43:32 +03:00
|
|
|
|
- 2023.07.20
|
2023-03-25 00:53:39 +03:00
|
|
|
|
|
|
|
|
|
provides:
|
|
|
|
|
- bin/llama.cpp
|
2023-07-24 23:43:32 +03:00
|
|
|
|
# NOTE! we do not “provide” convert.py. ∵ it’s too generic
|
|
|
|
|
# do `tea +github.com∕ggerganov∕llama.cpp convert.py`
|
|
|
|
|
|
|
|
|
|
platforms:
|
|
|
|
|
- linux
|
|
|
|
|
- darwin/aarch64
|
|
|
|
|
# Illegal instruction: 4 on darwin/x86-64
|
|
|
|
|
|
|
|
|
|
dependencies:
|
|
|
|
|
python.org: ^3.11
|
|
|
|
|
tea.xyz: ^0 # the scripts use tea/cli
|
2023-03-25 00:53:39 +03:00
|
|
|
|
|
|
|
|
|
build:
|
|
|
|
|
dependencies:
|
|
|
|
|
tea.xyz/gx/cc: c99
|
|
|
|
|
tea.xyz/gx/make: '*'
|
2023-07-24 23:43:32 +03:00
|
|
|
|
gnu.org/coreutils: '*'
|
|
|
|
|
env:
|
|
|
|
|
VIRTUAL_ENV: ${{prefix}}/venv
|
|
|
|
|
script:
|
|
|
|
|
- |
|
|
|
|
|
make --jobs {{hw.concurrency}}
|
|
|
|
|
install -D main {{prefix}}/libexec/llama.cpp
|
|
|
|
|
install -D props/llama.cpp {{prefix}}/bin/llama.cpp
|
|
|
|
|
|
|
|
|
|
- |
|
|
|
|
|
mkdir -p {{prefix}}/share
|
|
|
|
|
mv prompts {{prefix}}/share
|
|
|
|
|
mv props/llama-fetch {{prefix}}/libexec
|
|
|
|
|
|
|
|
|
|
- |
|
|
|
|
|
install -D convert.py $VIRTUAL_ENV/bin/convert.py
|
|
|
|
|
python-venv-stubber.sh convert.py
|
|
|
|
|
|
|
|
|
|
- python -m venv $VIRTUAL_ENV
|
|
|
|
|
- |
|
|
|
|
|
source $VIRTUAL_ENV/bin/activate
|
|
|
|
|
pip install -r requirements.txt
|
|
|
|
|
deactivate
|
2023-03-25 00:53:39 +03:00
|
|
|
|
|
|
|
|
|
test: |
|
2023-07-24 23:43:32 +03:00
|
|
|
|
{{prefix}}/libexec/llama.cpp --help
|
|
|
|
|
# testing more than this requires downloading the models 😬
|
|
|
|
|
|
|
|
|
|
entrypoint: llama.cpp
|