distributable: url: https://github.com/ggerganov/llama.cpp/archive/refs/tags/b{{version.raw}}.tar.gz strip-components: 1 versions: github: ggerganov/llama.cpp/tags strip: /^b/ display-name: LLaMA.cpp provides: - bin/llama.cpp # NOTE! we do not “provide” convert.py. ∵ it’s too generic # do `tea +github.com∕ggerganov∕llama.cpp convert.py` platforms: - linux - darwin/aarch64 # Illegal instruction: 4 on darwin/x86-64 dependencies: python.org: ~3.11 pkgx.sh: ^1 build: dependencies: gnu.org/coreutils: '*' git-scm.org: '*' env: VIRTUAL_ENV: ${{prefix}}/venv CC: clang CXX: clang++ LD: clang script: # segfaults on some GHA runners - run: | sed -i.bak -e's/\(MK_.* -march=native -mtune=native\)/#\1/g' Makefile rm Makefile.bak if: linux/x86-64 - make --jobs {{hw.concurrency}} - | install -D main {{prefix}}/bin/llama.cpp install -D props/entrypoint.sh {{prefix}}/entrypoint.sh install -D ggml-metal.metal {{prefix}}/bin/ggml-metal.metal - | mkdir -p {{prefix}}/share mv prompts {{prefix}}/share - | install -D convert.py $VIRTUAL_ENV/bin/convert.py python-venv-stubber.sh convert.py - | python -m venv $VIRTUAL_ENV source $VIRTUAL_ENV/bin/activate pip install -r requirements.txt deactivate test: '{{prefix}}/bin/llama.cpp --help' # ^^ testing more than this requires downloading the models 😬 entrypoint: ./entrypoint.sh