fix(text-generation-webui) (#3579)

* fix(text-generation-webui)

closes #3576

* let's see if this works
This commit is contained in:
Jacob Heider 2023-10-08 21:35:21 -04:00 committed by GitHub
parent a544281f78
commit fe4553718c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 35 additions and 22 deletions

View file

@ -1,10 +1,11 @@
#!/usr/bin/env -S tea bash #!/usr/bin/env -S pkgx bash
# shellcheck shell=bash
PORT=$(tea get-port) PORT=$(pkgx get-port)
set -emfo pipefail set -emfo pipefail
tea gum format <<EoMD pkgx gum format <<EoMD
# loading llama.cpp model… # loading llama.cpp model…
this may take a while this may take a while
@ -13,14 +14,14 @@ EoMD
echo # spacer echo # spacer
tea llama.cpp --fetch pkgx llama.cpp --fetch
d="$(cd "$(dirname $0)" && pwd)" d="$(cd "$(dirname "$0")" && pwd)"
XDG="${XDG_DATA_HOME:-$HOME/.local/share}" XDG="${XDG_DATA_HOME:-$HOME/.local/share}"
"$d"/bin/text-generation-webui \ "$d"/bin/text-generation-webui \
--listen-port $PORT \ --listen-port "$PORT" \
--model-dir "$XDG/models" \ --model-dir "$XDG/models" \
--model OpenLLaMA \ --model OpenLLaMA \
& &
@ -28,7 +29,7 @@ XDG="${XDG_DATA_HOME:-$HOME/.local/share}"
PID=$! PID=$!
# poll until a HEAD request succeeds # poll until a HEAD request succeeds
while ! curl -Is http://127.0.0.1:$PORT | grep -q "HTTP/1.1 200 OK"; do while ! curl -Is http://127.0.0.1:"$PORT" | grep -q "HTTP/1.1 200 OK"; do
if ! kill -0 $PID; then if ! kill -0 $PID; then
echo "webui process died!" echo "webui process died!"
exit 1 exit 1
@ -37,20 +38,21 @@ while ! curl -Is http://127.0.0.1:$PORT | grep -q "HTTP/1.1 200 OK"; do
done done
# open the URL once the HEAD request succeeds # open the URL once the HEAD request succeeds
if test -n "$TEA_GUI"; then # shellcheck disable=SC2154
echo "{\"xyz.tea\":{\"gui\":\"http://127.0.0.1:$PORT\"}}" >&2 if test -n "$pkgx_GUI"; then
echo "{\"xyz.pkgx\":{\"gui\":\"http://127.0.0.1:$PORT\"}}" >&2
else else
open "http://127.0.0.1:$PORT" open "http://127.0.0.1:$PORT"
fi fi
tea gum format <<EoMD pkgx gum format <<EoMD
# text generation web UI # text generation web UI
this package has been modified for your convenience: this package has been modified for your convenience:
* download additional models to \`$XDG/models\` * download additional models to \`$XDG/models\`
> bugs reports to our [tracker](https://github.com/teaxyz/pantry/issues). thanks! > bugs reports to our [tracker](https://github.com/pkgxxyz/pantry/issues). thanks!
enjoy! enjoy!
EoMD EoMD

View file

@ -9,30 +9,27 @@ versions:
dependencies: dependencies:
python.org: ~3.10 python.org: ~3.10
tea.xyz: ^0 pkgx.sh: ^1
entrypoint: tea ./entrypoint.sh entrypoint: pgkx ./entrypoint.sh
display-name: text generation web UI display-name: text generation web UI
platforms: platforms: darwin
darwin
# TODO https://github.com/oobabooga/text-generation-webui/blob/385229313fd728f6e7573895564253d98b9826da/docs/llama.cpp.md?plain=1#L4 # TODO https://github.com/oobabooga/text-generation-webui/blob/385229313fd728f6e7573895564253d98b9826da/docs/llama.cpp.md?plain=1#L4
# TODO entry # TODO entry
build: build:
dependencies: dependencies:
gnu.org/coreutils: '*' gnu.org/coreutils: '*'
working-directory: working-directory: text-generation-webui-{{version.raw}}
text-generation-webui-{{version.raw}}
script: script:
# pkg expects all the files from its checkout # pkg expects all the files from its checkout
- | - |
mkdir -p {{prefix}}/venv/bin mkdir -p {{prefix}}/venv/bin
cp -R . {{prefix}}/venv/bin cp -R . {{prefix}}/venv/bin
- working-directory: '{{prefix}}/venv/bin' - working-directory: '{{prefix}}/venv/bin'
run: run: rm -rf docker .github docs .gitignore *.md
rm -rf docker .github docs .gitignore *.md
- | - |
python -m venv {{prefix}}/venv python -m venv {{prefix}}/venv
@ -41,7 +38,13 @@ build:
# these requirements are separate as they vary by platform # these requirements are separate as they vary by platform
- pip install torch torchvision torchaudio - pip install torch torchvision torchaudio
- pip install -r requirements.txt # requirements_apple_intel.txt links to a missing version
# of llama.cpp
- run: |
sed -i.bak -e's/llama_cpp_python-0.2.11-cp310-cp310-macosx_13_0_x86_64.whl/llama_cpp_python-0.2.11-cp310-cp310-macosx_12_0_x86_64.whl/' requirements_apple_intel.txt
rm requirements_apple_intel.txt.bak
if: darwin #/x86-64
- pip install -r $REQS
# pkg expects to be run with CWD set to its checkout # pkg expects to be run with CWD set to its checkout
- working-directory: '{{prefix}}/venv/bin' - working-directory: '{{prefix}}/venv/bin'
@ -56,6 +59,14 @@ build:
- python-venv-stubber.sh text-generation-webui - python-venv-stubber.sh text-generation-webui
- cp ../props/entrypoint.sh {{prefix}} - cp ../props/entrypoint.sh {{prefix}}
env:
linux:
REQS: requirements.txt
darwin/aarch64:
REQS: requirements_apple_silicon.txt
darwin/x86-64:
REQS: requirements_apple_intel.txt
test: test:
text-generation-webui --help qa-required: true
script: text-generation-webui --help