| # if conf does not exist, create it | |
| if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then | |
| mkdir -p "$HOME/.config/llama" | |
| cat <<EOF > "$HOME/.config/llama/llama-server.conf" | |
| LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf | |
| LLAMA_CONTEXT_SIZE=8192 | |
| LLAMA_PORT=8000 | |
| LLAMA_LOG=$HOME/.local/var/llama-server.log | |
| EOF | |
| fi | |
| source "$HOME/.config/llama/llama-server.conf" | |
| # if arg1 is "stop" then pkill the koboldcpp.py server | |
| if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then | |
| echo "Stopping llama server" | |
| pkill -f "llama-server" | |
| echo "ok" | |
| exit | |
| fi | |
| # start server and pipe stdout+stderr to log file | |
| llama-server \ | |
| --model "$LLAMA_MODEL_NAME" \ | |
| --ctx-size "$LLAMA_CONTEXT_SIZE" \ | |
| --n-gpu-layers 1 \ | |
| --port "$LLAMA_PORT" \ | |
| > "$LLAMA_LOG" 2>&1 & | |
| echo "Started llama.cpp server" | |