Upload app.py
Browse files
app.py
CHANGED
|
@@ -91,15 +91,17 @@ def instruct_generate(
|
|
| 91 |
encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
|
| 92 |
# prompt_length = encoded.size(0)
|
| 93 |
|
| 94 |
-
y = generate(
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
)
|
|
|
|
|
|
|
| 103 |
|
| 104 |
output = tokenizer.decode(y)
|
| 105 |
output = output.split("### Response:")[1].strip()
|
|
|
|
| 91 |
encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device)
|
| 92 |
# prompt_length = encoded.size(0)
|
| 93 |
|
| 94 |
+
# y = generate(
|
| 95 |
+
# model,
|
| 96 |
+
# idx=encoded,
|
| 97 |
+
# max_seq_length=max_new_tokens,
|
| 98 |
+
# max_new_tokens=max_new_tokens,
|
| 99 |
+
# temperature=temperature,
|
| 100 |
+
# top_k=top_k,
|
| 101 |
+
# eos_id=tokenizer.eos_id
|
| 102 |
+
# )
|
| 103 |
+
|
| 104 |
+
y = generate(model, encoded, max_new_tokens, temperature=temperature, top_k=top_k, eos_id=tokenizer.eos_id)
|
| 105 |
|
| 106 |
output = tokenizer.decode(y)
|
| 107 |
output = output.split("### Response:")[1].strip()
|