Spaces:
Runtime error
Runtime error
gmerrill
commited on
Commit
·
20165cd
1
Parent(s):
18340a0
update
Browse files
main.py
CHANGED
@@ -27,18 +27,18 @@ def get_prompt(user_query: str, functions: list = []) -> str:
|
|
27 |
return f"USER: <<question>> {user_query} <<function>> {functions_string}\nASSISTANT: "
|
28 |
|
29 |
device : str = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
30 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
31 |
|
32 |
result = subprocess.run('pwd && ls -lH && find /.cache/huggingface/hub && find /.cache/gorilla', shell=True, capture_output=True, text=True)
|
33 |
log(result.stdout)
|
34 |
|
35 |
-
model_id : str = "gorilla-llm/gorilla-openfunctions-v1"
|
|
|
36 |
log('AutoTokenizer.from_pretrained ...')
|
37 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
38 |
log('AutoModelForCausalLM.from_pretrained ...')
|
39 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True)
|
40 |
-
log('AutoModelForCausalLM.save_pretrained ...')
|
41 |
-
model.save_pretrained('/.cache/gorilla')
|
42 |
|
43 |
result = subprocess.run('pwd && ls -lH && find /.cache/huggingface/hub && find /.cache/gorilla', shell=True, capture_output=True, text=True)
|
44 |
log(result.stdout)
|
|
|
27 |
return f"USER: <<question>> {user_query} <<function>> {functions_string}\nASSISTANT: "
|
28 |
|
29 |
device : str = "cuda:0" if torch.cuda.is_available() else "cpu"
|
30 |
+
log('Device: ' + device)
|
31 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
32 |
|
33 |
result = subprocess.run('pwd && ls -lH && find /.cache/huggingface/hub && find /.cache/gorilla', shell=True, capture_output=True, text=True)
|
34 |
log(result.stdout)
|
35 |
|
36 |
+
#model_id : str = "gorilla-llm/gorilla-openfunctions-v1"
|
37 |
+
model_id : str = "M-FAC/bert-tiny-finetuned-mrpc"
|
38 |
log('AutoTokenizer.from_pretrained ...')
|
39 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
40 |
log('AutoModelForCausalLM.from_pretrained ...')
|
41 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True)
|
|
|
|
|
42 |
|
43 |
result = subprocess.run('pwd && ls -lH && find /.cache/huggingface/hub && find /.cache/gorilla', shell=True, capture_output=True, text=True)
|
44 |
log(result.stdout)
|