| #!/bin/bash |
|
|
| |
| if [ ! -f "$HOME/.config/llama/llama-koboldcpp.conf" ]; then |
| mkdir -p "$HOME/.config/llama" |
| cat <<EOF > "$HOME/.config/llama/llama-koboldcpp.conf" |
| LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
| LLAMA_CONTEXT_SIZE=8192 |
| |
| KOBOLD_PORT=5000 |
| KOBOLD_LOG=$HOME/.local/var/llama-koboldcpp.log |
| PYTHON_EXEC=$HOME/.virtualenvs/koboldcpp/bin/python |
| EOF |
| fi |
|
|
| source "$HOME/.config/llama/llama-koboldcpp.conf" |
|
|
| |
| if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then |
| echo "Stopping koboldcpp server..." |
| pkill -f "koboldcpp.py" |
| echo "ok" |
| exit |
| fi |
|
|
| |
| cd ~/Work/koboldcpp || exit |
| "$PYTHON_EXEC" koboldcpp.py \ |
| --gpulayers 1 \ |
| --model "$LLAMA_MODEL_NAME" \ |
| --contextsize "$LLAMA_CONTEXT_SIZE" \ |
| --threads 6 \ |
| --skiplauncher \ |
| --smartcontext \ |
| --noblas \ |
| --host "0.0.0.0" \ |
| --port "$KOBOLD_PORT" \ |
| > "$KOBOLD_LOG" 2>&1 & |
| echo "Started koboldcpp server on port $KOBOLD_PORT" |
|
|