+llama.cpp

This commit is contained in:
Max Howell 2023-03-28 08:41:23 -04:00
parent bde31f1fff
commit 96857e732b
5 changed files with 98 additions and 4 deletions

View file

@ -0,0 +1,17 @@
#!/bin/sh
set -e
test -n "$VERBOSE" && set -x
D="$(cd "$(dirname "$0")"/.. && pwd)"
VERSION="$(basename "$D")"
MODEL_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/models/alpaca-LoRA"
export PATH="$D/tbin:$PATH"
alpaca.cpp-fetch-model "$MODEL_DIR" "$VERSION"
exec "$D"/tbin/alpaca.cpp \
--color \
--model "$MODEL_DIR"/ggml-alpaca-7b-q4.bin \
"$@"

View file

@ -0,0 +1,52 @@
#!/bin/sh
set -e
test -n "$VERBOSE" && set -x
if test -f "$1"/VERSION && test $(cat "$1"/VERSION) = $2; then
exit
fi
if command -v git >/dev/null; then
GIT="tea git"
else
GIT=git
fi
mkdir -p "$1"
cd "$1"
tea gum format "# preparing for model fetch"
echo # spacer
URL=$(curl -Ssf \
https://raw.githubusercontent.com/ItsPi3141/alpaca.cpp/master/README.md | \
grep -o 'magnet:[^`]*')
if test -d trackers; then
$GIT -C trackers fetch origin
$GIT -C trackers reset --hard origin/master
else
$GIT clone "https://github.com/ngosang/trackerslist" trackers
fi
TRACKERS=$(grep -v '^#' "trackers/trackers_all.txt" | tr '\n' ',')
tea gum format <<EoMD
# downloading 4b quantized LLaMA (7B) model
models will be placed: \`$PWD\`
> this may take a a few minutes…
EoMD
tea aria2c \
--dir=. \
--seed-time=0 \
--bt-tracker="$TRACKERS" \
--summary-interval=0 \
--check-integrity \
"$URL"
echo $2 > VERSION
tea gum format "# All done!"
echo #spacer

View file

@ -0,0 +1,28 @@
distributable:
url: https://github.com/antimatter15/alpaca.cpp/archive/refs/tags/81bd894.tar.gz
strip-components: 1
versions:
- 2023.03.21
provides:
- bin/alpaca.cpp
build:
dependencies:
tea.xyz/gx/cc: c99
tea.xyz/gx/make: '*'
freedesktop.org/pkg-config: ~0.29
gnu.org/wget: '*'
script: |
mkdir -p {{prefix}}/bin {{prefix}}/tbin {{prefix}}/share
make chat
mv chat {{prefix}}/tbin/alpaca.cpp
mv props/alpaca.cpp {{prefix}}/bin
mv props/alpaca.cpp-fetch-model {{prefix}}/tbin
test: |
{{prefix}}/tbin/alpaca.cpp --help
# testing more than this requires downloading the models 😬

View file

@ -5,7 +5,7 @@ test -n "$VERBOSE" && set -x
D="$(cd "$(dirname "$0")"/.. && pwd)" D="$(cd "$(dirname "$0")"/.. && pwd)"
VERSION="$(basename "$D")" VERSION="$(basename "$D")"
MODEL_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/LLaMA" MODEL_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/models/LLaMA"
export PATH="$D/tbin:$PATH" export PATH="$D/tbin:$PATH"

View file

@ -5,9 +5,6 @@ distributable:
versions: versions:
- 2023.03.23 - 2023.03.23
dependencies:
python.org: ^3.11
provides: provides:
- bin/llama.cpp - bin/llama.cpp