Spaces:
Runtime error
Runtime error
Commit
·
8261e17
1
Parent(s):
c0d7fe6
Update app.py
Browse files
app.py
CHANGED
@@ -12,12 +12,11 @@
|
|
12 |
%cd GPTQ-for-LLaMa
|
13 |
!python setup_cuda.py install
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
%cd /content/text-generation-webui
|
23 |
!python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
|
|
|
12 |
%cd GPTQ-for-LLaMa
|
13 |
!python setup_cuda.py install
|
14 |
|
15 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
16 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/generation_config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
17 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
18 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
19 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
20 |
+
hf-download https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors --output-dir /content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ
|
|
|
21 |
%cd /content/text-generation-webui
|
22 |
!python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
|