|
#!/bin/bash |
|
|
|
function llama_init_environment { |
|
|
|
if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-main.conf" |
|
LLAMA_TEMPERATURE=0.1 |
|
LLAMA_CONTEXT_SIZE=4096 |
|
LLAMA_REPETITION_PENALTY=1.15 |
|
LLAMA_TOP_P=0.9 |
|
LLAMA_TOP_K=20 |
|
LLAMA_TEMPLATE=chatml |
|
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
|
LLAMA_MODELS_PATH=$HOME/.ai/models/llama |
|
LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts |
|
LLAMA_CACHE_PATH=$HOME/.cache/llama |
|
EOF |
|
fi |
|
|
|
source $HOME/.config/llama/llama-main.conf |
|
|
|
if [ ! -d "$LLAMA_CACHE_PATH" ]; then |
|
mkdir -p "$LLAMA_CACHE_PATH" |
|
fi |
|
|
|
} |
|
|
|
function llama_interactive { |
|
|
|
if [[ $# -eq 4 ]]; then |
|
LLAMA_MODEL_NAME=$1 |
|
LLAMA_TEMPLATE=$2 |
|
LLAMA_CONTEXT_SIZE=$3 |
|
LLAMA_TEMPERATURE=$4 |
|
fi |
|
|
|
|
|
if [[ $# -lt 4 ]] && [[ $# -gt 0 ]]; then |
|
echo "Error: 4 arguments are required." |
|
echo "Usage: llama <model_name> <template> <context_size> <temperature>" |
|
echo "Example: llama teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf chatml 4096 0.1" |
|
return |
|
fi |
|
|
|
llama \ |
|
--n-gpu-layers 1 \ |
|
--model "$LLAMA_MODELS_PATH/$LLAMA_MODEL_NAME" \ |
|
--prompt-cache "$LLAMA_CACHE_PATH/${LLAMA_MODEL_NAME//[\/\.]/-}-${LLAMA_CONTEXT_SIZE}.cache" \ |
|
--file "$(get_model_prompt $LLAMA_MODEL_NAME)" \ |
|
--in-prefix "$(get_model_prefix $LLAMA_TEMPLATE)" \ |
|
--in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \ |
|
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \ |
|
--reverse-prompt "<|im_end|>" \ |
|
--threads "6" \ |
|
--temp "$LLAMA_TEMPERATURE" \ |
|
--top-p "$LLAMA_TOP_P" \ |
|
--top-k "$LLAMA_TOP_K" \ |
|
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \ |
|
--ctx-size "$LLAMA_CONTEXT_SIZE" \ |
|
--batch-size 1024 \ |
|
--n-predict -1 \ |
|
--keep -1 \ |
|
--instruct \ |
|
--no-mmap \ |
|
--color \ |
|
--escape \ |
|
--log-disable |
|
} |
|
|
|
function get_model_prefix { |
|
case $1 in |
|
*guanaco*) |
|
printf "### Human: " |
|
;; |
|
*alpaca*|*upstage*|*airoboros*|*hermes*) |
|
printf "### Instruction: " |
|
;; |
|
*vicuna*|*wizardlm*|*samantha*|*scarlett*) |
|
printf "USER: " |
|
;; |
|
*based*|*yi*) |
|
printf "Human: " |
|
;; |
|
*orca*) |
|
printf "### User: " |
|
;; |
|
chatml) |
|
printf "<|im_start|>user\\\n" |
|
;; |
|
*) |
|
printf "Input: " |
|
;; |
|
esac |
|
} |
|
|
|
function get_model_suffix { |
|
case $1 in |
|
*guanaco*) |
|
printf "### Assistant:" |
|
;; |
|
*alpaca*|*upstage*|*airoboros*|*hermes*) |
|
printf "### Response:" |
|
;; |
|
*vicuna*|*wizardlm*|*samantha*|*scarlett*) |
|
printf "ASSISTANT:" |
|
;; |
|
*based*|*yi*) |
|
printf "Assistant:" |
|
;; |
|
*orca*) |
|
printf "### Response:" |
|
;; |
|
chatml) |
|
printf "<|im_end|>\n<|im_start|>assistant\\\n" |
|
;; |
|
*) |
|
printf "Output:" |
|
;; |
|
esac |
|
} |
|
|
|
function get_model_prompt { |
|
case $1 in |
|
*guanaco*) |
|
echo "$LLAMA_PROMPT_PATH/guanaco.txt" |
|
;; |
|
*alpaca*|*upstage*|*airoboros*|*hermes*) |
|
echo "$LLAMA_PROMPT_PATH/alpaca.txt" |
|
;; |
|
*vicuna*|*wizardlm*) |
|
echo "$LLAMA_PROMPT_PATH/vicuna-v11.txt" |
|
;; |
|
*scarlett*) |
|
echo "$LLAMA_PROMPT_PATH/scarlett.txt" |
|
;; |
|
*samantha*) |
|
echo "$LLAMA_PROMPT_PATH/samantha.txt" |
|
;; |
|
*based*|*yi*) |
|
echo "$LLAMA_PROMPT_PATH/based.txt" |
|
;; |
|
*orca*) |
|
echo "$LLAMA_PROMPT_PATH/orca.txt" |
|
;; |
|
*) |
|
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt" |
|
;; |
|
esac |
|
} |
|
|
|
llama_init_environment |
|
llama_interactive $* |
|
|