Commit
·
538ca27
1
Parent(s):
d6ab723
llama general-runner
Browse filesllama gguf menu picker
- .gitignore +1 -0
- bin/llama-menu.sh +11 -0
- bin/llama.sh +161 -0
- install.sh +2 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
/**/main*.log
|
bin/llama-menu.sh
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
clear
|
2 |
+
|
3 |
+
CHOICES=$(cd ~/.ai/models/llama && find . -name "*.gguf")
|
4 |
+
SELECTED=$(echo "$CHOICES" | fzf --reverse --border --ansi)
|
5 |
+
|
6 |
+
if [[ -z "$SELECTED" ]]; then
|
7 |
+
return
|
8 |
+
fi
|
9 |
+
|
10 |
+
echo "Selected: $SELECTED"
|
11 |
+
llama.sh "$SELECTED" chatml 2048 0.8
|
bin/llama.sh
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# if not enough arguments
|
4 |
+
if [[ $# -lt 4 ]]; then
|
5 |
+
echo "Usage: llama.sh <model> <template> <context-size> <temperature>"
|
6 |
+
exit 1
|
7 |
+
fi
|
8 |
+
|
9 |
+
# if conf does not exist, create it
|
10 |
+
if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then
|
11 |
+
mkdir -p "$HOME/.config/llama"
|
12 |
+
cat <<EOF > "$HOME/.config/llama/llama-main.conf"
|
13 |
+
LLAMA_TEMPERATURE=0.1
|
14 |
+
LLAMA_CONTEXT_SIZE=4096
|
15 |
+
LLAMA_REPETITION_PENALTY=1.15
|
16 |
+
LLAMA_TOP_P=0.9
|
17 |
+
LLAMA_TOP_K=20
|
18 |
+
LLAMA_TEMPLATE=chatml
|
19 |
+
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
20 |
+
EOF
|
21 |
+
fi
|
22 |
+
|
23 |
+
source $HOME/.config/llama/llama-main.conf
|
24 |
+
|
25 |
+
function llama_interactive {
|
26 |
+
MODEL_NAME=$1
|
27 |
+
TEMPLATE=$2
|
28 |
+
CONTEXT_SIZE=$3
|
29 |
+
TEMPERATURE=$4
|
30 |
+
|
31 |
+
CMD=$HOME/.ai/bin/llama
|
32 |
+
|
33 |
+
$CMD \
|
34 |
+
--n-gpu-layers 1 \
|
35 |
+
--model "$(model_path $MODEL_NAME)" \
|
36 |
+
--prompt-cache "$(cache_path $MODEL_NAME)" \
|
37 |
+
--file "$(get_model_prompt $MODEL_NAME)" \
|
38 |
+
--in-prefix "$(get_model_prefix $TEMPLATE)" \
|
39 |
+
--in-suffix "$(get_model_suffix $TEMPLATE)" \
|
40 |
+
--reverse-prompt "$(get_model_prefix $TEMPLATE)" \
|
41 |
+
--reverse-prompt "<|im_end|>" \
|
42 |
+
--threads "7" \
|
43 |
+
--temp "$TEMPERATURE" \
|
44 |
+
--top-p "$LLAMA_TOP_P" \
|
45 |
+
--top-k "$LLAMA_TOP_K" \
|
46 |
+
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \
|
47 |
+
--ctx-size "$CONTEXT_SIZE" \
|
48 |
+
--batch-size 1024 \
|
49 |
+
--n-predict -1 \
|
50 |
+
--keep -1 \
|
51 |
+
--instruct \
|
52 |
+
--no-mmap \
|
53 |
+
--color \
|
54 |
+
--escape
|
55 |
+
}
|
56 |
+
|
57 |
+
function model_path {
|
58 |
+
MODEL_NAME=$1
|
59 |
+
echo "$HOME/.ai/models/llama/${MODEL_NAME}"
|
60 |
+
}
|
61 |
+
|
62 |
+
function cache_path {
|
63 |
+
MODEL_NAME=$1
|
64 |
+
echo "$HOME/.ai/cache/menu-${MODEL_NAME//\//_}.cache"
|
65 |
+
}
|
66 |
+
|
67 |
+
function get_model_prefix {
|
68 |
+
TEMPLATE_NAME=$1
|
69 |
+
|
70 |
+
# if TEMPLATE_NAME contains string "guanaco"
|
71 |
+
if [[ $TEMPLATE_NAME == *"guanaco"* ]]; then
|
72 |
+
printf "### Human: "
|
73 |
+
elif [[ $TEMPLATE_NAME == *"alpaca"* ]]; then
|
74 |
+
printf "### Instruction: "
|
75 |
+
elif [[ $TEMPLATE_NAME == *"upstage"* ]]; then
|
76 |
+
printf "### Instruction: "
|
77 |
+
elif [[ $TEMPLATE_NAME == *"airoboros"* ]]; then
|
78 |
+
printf "### Instruction: "
|
79 |
+
elif [[ $TEMPLATE_NAME == *"hermes"* ]]; then
|
80 |
+
printf "### Instruction:"
|
81 |
+
elif [[ $TEMPLATE_NAME == *"vicuna"* ]]; then
|
82 |
+
printf "USER: "
|
83 |
+
elif [[ $TEMPLATE_NAME == *"based"* ]]; then
|
84 |
+
printf "Human: "
|
85 |
+
elif [[ $TEMPLATE_NAME == *"wizardlm"* ]]; then
|
86 |
+
printf "USER: "
|
87 |
+
elif [[ $TEMPLATE_NAME == *"orca"* ]]; then
|
88 |
+
printf "### User: "
|
89 |
+
elif [[ $TEMPLATE_NAME == *"samantha"* ]]; then
|
90 |
+
printf "USER: "
|
91 |
+
elif [[ $TEMPLATE_NAME == "chatml" ]]; then
|
92 |
+
printf "<|im_start|>user\\\n"
|
93 |
+
else
|
94 |
+
printf "Input: "
|
95 |
+
fi
|
96 |
+
}
|
97 |
+
|
98 |
+
# USER: hello, who are you? ASSISTANT:
|
99 |
+
|
100 |
+
function get_model_suffix {
|
101 |
+
TEMPLATE_NAME=$1
|
102 |
+
|
103 |
+
# if TEMPLATE_NAME contains string "guanaco"
|
104 |
+
if [[ $TEMPLATE_NAME == *"guanaco"* ]]; then
|
105 |
+
printf "### Assistant: "
|
106 |
+
elif [[ $TEMPLATE_NAME == *"alpaca"* ]]; then
|
107 |
+
printf "### Response: "
|
108 |
+
elif [[ $TEMPLATE_NAME == *"airoboros"* ]]; then
|
109 |
+
printf "### Response: "
|
110 |
+
elif [[ $TEMPLATE_NAME == *"upstage"* ]]; then
|
111 |
+
printf "### Response: "
|
112 |
+
elif [[ $TEMPLATE_NAME == *"hermes"* ]]; then
|
113 |
+
printf "### Response: "
|
114 |
+
elif [[ $TEMPLATE_NAME == *"vicuna"* ]]; then
|
115 |
+
printf "ASSISTANT: "
|
116 |
+
elif [[ $TEMPLATE_NAME == *"samantha"* ]]; then
|
117 |
+
printf "ASSISTANT: "
|
118 |
+
elif [[ $TEMPLATE_NAME == *"based"* ]]; then
|
119 |
+
printf "Assistant: "
|
120 |
+
elif [[ $TEMPLATE_NAME == *"wizardlm"* ]]; then
|
121 |
+
printf "ASSISTANT: "
|
122 |
+
elif [[ $TEMPLATE_NAME == *"orca"* ]]; then
|
123 |
+
printf "### Response: "
|
124 |
+
elif [[ $TEMPLATE_NAME == "chatml" ]]; then
|
125 |
+
printf "<|im_end|>\n<|im_start|>assistant\\\n"
|
126 |
+
else
|
127 |
+
printf "Output: "
|
128 |
+
fi
|
129 |
+
}
|
130 |
+
|
131 |
+
function get_model_prompt {
|
132 |
+
MODEL_NAME=$1
|
133 |
+
|
134 |
+
if [[ $MODEL_NAME == *"guanaco"* ]]; then
|
135 |
+
echo "$HOME/.local/share/ai/prompts/guanaco.txt"
|
136 |
+
elif [[ $MODEL_NAME == *"samantha"* ]]; then
|
137 |
+
echo "$HOME/.local/share/ai/prompts/samantha.txt"
|
138 |
+
elif [[ $MODEL_NAME == *"openhermes-2-mistral-7b"* ]]; then
|
139 |
+
echo "$HOME/.local/share/ai/prompts/hermes-mistral.txt"
|
140 |
+
elif [[ $MODEL_NAME == *"alpaca"* ]]; then
|
141 |
+
echo "$HOME/.local/share/ai/prompts/alpaca.txt"
|
142 |
+
elif [[ $MODEL_NAME == *"upstage"* ]]; then
|
143 |
+
echo "$HOME/.local/share/ai/prompts/alpaca.txt"
|
144 |
+
elif [[ $MODEL_NAME == *"airoboros"* ]]; then
|
145 |
+
echo "$HOME/.local/share/ai/prompts/alpaca.txt"
|
146 |
+
elif [[ $MODEL_NAME == *"hermes"* ]]; then
|
147 |
+
echo "$HOME/.local/share/ai/prompts/alpaca.txt"
|
148 |
+
elif [[ $MODEL_NAME == *"vicuna"* ]]; then
|
149 |
+
echo "$HOME/.local/share/ai/prompts/vicuna-v11.txt"
|
150 |
+
elif [[ $MODEL_NAME == *"based"* ]]; then
|
151 |
+
echo "$HOME/.local/share/ai/prompts/based.txt"
|
152 |
+
elif [[ $MODEL_NAME == *"wizardlm"* ]]; then
|
153 |
+
echo "$HOME/.local/share/ai/prompts/wizardlm-30b.txt"
|
154 |
+
elif [[ $MODEL_NAME == *"orca"* ]]; then
|
155 |
+
echo "$HOME/.local/share/ai/prompts/orca.txt"
|
156 |
+
else
|
157 |
+
echo "$HOME/.local/share/ai/prompts/idm-gpt-lite.txt"
|
158 |
+
fi
|
159 |
+
}
|
160 |
+
|
161 |
+
llama_interactive "$1" "$2" "$3" "$4"
|
install.sh
CHANGED
@@ -7,5 +7,7 @@ mkdir -p ~/.local/bin
|
|
7 |
install -C -v ./bin/llama-hf-to-q6_k.sh ~/.local/bin
|
8 |
install -C -v ./bin/llama-update.sh ~/.local/bin
|
9 |
install -C -v ./bin/llama-finetune.sh ~/.local/bin
|
|
|
|
|
10 |
|
11 |
echo "done"
|
|
|
7 |
install -C -v ./bin/llama-hf-to-q6_k.sh ~/.local/bin
|
8 |
install -C -v ./bin/llama-update.sh ~/.local/bin
|
9 |
install -C -v ./bin/llama-finetune.sh ~/.local/bin
|
10 |
+
install -C -v ./bin/llama.sh ~/.local/bin
|
11 |
+
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
12 |
|
13 |
echo "done"
|