fix(text-generation-webui) (#3579)

* fix(text-generation-webui)

closes #3576

* let's see if this works
This commit is contained in:
Jacob Heider 2023-10-08 21:35:21 -04:00 committed by GitHub
parent a544281f78
commit fe4553718c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 35 additions and 22 deletions

View file

@ -1,10 +1,11 @@
#!/usr/bin/env -S tea bash
#!/usr/bin/env -S pkgx bash
# shellcheck shell=bash
PORT=$(tea get-port)
PORT=$(pkgx get-port)
set -emfo pipefail
tea gum format <<EoMD
pkgx gum format <<EoMD
# loading llama.cpp model…
this may take a while
@ -13,14 +14,14 @@ EoMD
echo # spacer
tea llama.cpp --fetch
pkgx llama.cpp --fetch
d="$(cd "$(dirname $0)" && pwd)"
d="$(cd "$(dirname "$0")" && pwd)"
XDG="${XDG_DATA_HOME:-$HOME/.local/share}"
"$d"/bin/text-generation-webui \
--listen-port $PORT \
--listen-port "$PORT" \
--model-dir "$XDG/models" \
--model OpenLLaMA \
&
@ -28,7 +29,7 @@ XDG="${XDG_DATA_HOME:-$HOME/.local/share}"
PID=$!
# poll until a HEAD request succeeds
while ! curl -Is http://127.0.0.1:$PORT | grep -q "HTTP/1.1 200 OK"; do
while ! curl -Is http://127.0.0.1:"$PORT" | grep -q "HTTP/1.1 200 OK"; do
if ! kill -0 $PID; then
echo "webui process died!"
exit 1
@ -37,20 +38,21 @@ while ! curl -Is http://127.0.0.1:$PORT | grep -q "HTTP/1.1 200 OK"; do
done
# open the URL once the HEAD request succeeds
if test -n "$TEA_GUI"; then
echo "{\"xyz.tea\":{\"gui\":\"http://127.0.0.1:$PORT\"}}" >&2
# shellcheck disable=SC2154
if test -n "$pkgx_GUI"; then
echo "{\"xyz.pkgx\":{\"gui\":\"http://127.0.0.1:$PORT\"}}" >&2
else
open "http://127.0.0.1:$PORT"
fi
tea gum format <<EoMD
pkgx gum format <<EoMD
# text generation web UI
this package has been modified for your convenience:
* download additional models to \`$XDG/models\`
> bugs reports to our [tracker](https://github.com/teaxyz/pantry/issues). thanks!
> bugs reports to our [tracker](https://github.com/pkgxxyz/pantry/issues). thanks!
enjoy!
EoMD

View file

@ -9,30 +9,27 @@ versions:
dependencies:
python.org: ~3.10
tea.xyz: ^0
pkgx.sh: ^1
entrypoint: tea ./entrypoint.sh
entrypoint: pgkx ./entrypoint.sh
display-name: text generation web UI
platforms:
darwin
platforms: darwin
# TODO https://github.com/oobabooga/text-generation-webui/blob/385229313fd728f6e7573895564253d98b9826da/docs/llama.cpp.md?plain=1#L4
# TODO entry
build:
dependencies:
gnu.org/coreutils: '*'
working-directory:
text-generation-webui-{{version.raw}}
working-directory: text-generation-webui-{{version.raw}}
script:
# pkg expects all the files from its checkout
- |
mkdir -p {{prefix}}/venv/bin
cp -R . {{prefix}}/venv/bin
- working-directory: '{{prefix}}/venv/bin'
run:
rm -rf docker .github docs .gitignore *.md
run: rm -rf docker .github docs .gitignore *.md
- |
python -m venv {{prefix}}/venv
@ -41,7 +38,13 @@ build:
# these requirements are separate as they vary by platform
- pip install torch torchvision torchaudio
- pip install -r requirements.txt
# requirements_apple_intel.txt links to a missing version
# of llama.cpp
- run: |
sed -i.bak -e's/llama_cpp_python-0.2.11-cp310-cp310-macosx_13_0_x86_64.whl/llama_cpp_python-0.2.11-cp310-cp310-macosx_12_0_x86_64.whl/' requirements_apple_intel.txt
rm requirements_apple_intel.txt.bak
if: darwin #/x86-64
- pip install -r $REQS
# pkg expects to be run with CWD set to its checkout
- working-directory: '{{prefix}}/venv/bin'
@ -56,6 +59,14 @@ build:
- python-venv-stubber.sh text-generation-webui
- cp ../props/entrypoint.sh {{prefix}}
env:
linux:
REQS: requirements.txt
darwin/aarch64:
REQS: requirements_apple_silicon.txt
darwin/x86-64:
REQS: requirements_apple_intel.txt
test:
text-generation-webui --help
qa-required: true
script: text-generation-webui --help