Spaces:
Running
on
Zero
Running
on
Zero
qinghuazhou
commited on
Commit
·
b08f216
1
Parent(s):
a28e277
updated demo
Browse files- util/utils.py +3 -3
util/utils.py
CHANGED
@@ -35,7 +35,7 @@ def load_tok(model_name="gpt2-xl"):
|
|
35 |
|
36 |
elif model_name == 'llama-3-8b':
|
37 |
|
38 |
-
model = "meta-llama/Meta-Llama-3-8B"
|
39 |
tok = AutoTokenizer.from_pretrained(model)
|
40 |
tok.pad_token = tok.eos_token
|
41 |
|
@@ -75,7 +75,7 @@ def load_model_tok(model_name="gpt2-xl"):
|
|
75 |
|
76 |
elif model_name == 'llama-3-8b':
|
77 |
|
78 |
-
model = "meta-llama/Meta-Llama-3-8B"
|
79 |
tok = AutoTokenizer.from_pretrained(model)
|
80 |
model = AutoModelForCausalLM.from_pretrained(
|
81 |
model,
|
@@ -463,4 +463,4 @@ def print_request(rs):
|
|
463 |
print_single_request(rs)
|
464 |
else:
|
465 |
for r in rs:
|
466 |
-
print_single_request(r)
|
|
|
35 |
|
36 |
elif model_name == 'llama-3-8b':
|
37 |
|
38 |
+
model = "/data/meta-llama/Meta-Llama-3-8B"
|
39 |
tok = AutoTokenizer.from_pretrained(model)
|
40 |
tok.pad_token = tok.eos_token
|
41 |
|
|
|
75 |
|
76 |
elif model_name == 'llama-3-8b':
|
77 |
|
78 |
+
model = "/data/meta-llama/Meta-Llama-3-8B"
|
79 |
tok = AutoTokenizer.from_pretrained(model)
|
80 |
model = AutoModelForCausalLM.from_pretrained(
|
81 |
model,
|
|
|
463 |
print_single_request(rs)
|
464 |
else:
|
465 |
for r in rs:
|
466 |
+
print_single_request(r)
|