Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -298,7 +298,7 @@ def expand_prompt(prompt):
|
|
298 |
outputs = model.generate(
|
299 |
input_ids=input_ids,
|
300 |
attention_mask=attention_mask,
|
301 |
-
max_new_tokens=
|
302 |
temperature=0.2,
|
303 |
top_p=0.9,
|
304 |
do_sample=True,
|
@@ -312,7 +312,7 @@ def expand_prompt(prompt):
|
|
312 |
outputs_2 = model.generate(
|
313 |
input_ids=input_ids_2,
|
314 |
attention_mask=attention_mask_2,
|
315 |
-
max_new_tokens=
|
316 |
temperature=0.2,
|
317 |
top_p=0.9,
|
318 |
do_sample=True,
|
@@ -427,11 +427,6 @@ def generate_30(
|
|
427 |
expanded_1 = expanded[0]
|
428 |
expanded_2 = expanded[1]
|
429 |
|
430 |
-
expanded_1 = flatten_and_stringify(expanded_1)
|
431 |
-
expanded_1 = " ".join(expanded_1)
|
432 |
-
expanded_2 = flatten_and_stringify(expanded_2)
|
433 |
-
expanded_2 = " ".join(expanded_2)
|
434 |
-
|
435 |
prompt = flatten_and_stringify(prompt+expanded_1+expanded_2)
|
436 |
prompt = " ".join(prompt)
|
437 |
|
|
|
298 |
outputs = model.generate(
|
299 |
input_ids=input_ids,
|
300 |
attention_mask=attention_mask,
|
301 |
+
max_new_tokens=256,
|
302 |
temperature=0.2,
|
303 |
top_p=0.9,
|
304 |
do_sample=True,
|
|
|
312 |
outputs_2 = model.generate(
|
313 |
input_ids=input_ids_2,
|
314 |
attention_mask=attention_mask_2,
|
315 |
+
max_new_tokens=384,
|
316 |
temperature=0.2,
|
317 |
top_p=0.9,
|
318 |
do_sample=True,
|
|
|
427 |
expanded_1 = expanded[0]
|
428 |
expanded_2 = expanded[1]
|
429 |
|
|
|
|
|
|
|
|
|
|
|
430 |
prompt = flatten_and_stringify(prompt+expanded_1+expanded_2)
|
431 |
prompt = " ".join(prompt)
|
432 |
|