Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,7 @@ Dialogue:
|
|
30 |
|
31 |
What was going on?
|
32 |
"""
|
33 |
-
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=
|
34 |
|
35 |
inputs = tokenizer(prompt, return_tensors='pt')
|
36 |
output = tokenizer.decode(
|
@@ -73,7 +73,7 @@ What was going on?
|
|
73 |
|
74 |
# this is for one_shot
|
75 |
def one_shot(example_indices_full,my_example):
|
76 |
-
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=
|
77 |
|
78 |
inputs = tokenizer(my_prompt(example_indices_full,my_example), return_tensors='pt')
|
79 |
output = tokenizer.decode(
|
@@ -87,7 +87,7 @@ def one_shot(example_indices_full,my_example):
|
|
87 |
|
88 |
# few_shot
|
89 |
def few_shot(example_indices_full_few_shot,my_example):
|
90 |
-
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=
|
91 |
inputs = tokenizer(my_prompt(example_indices_full_few_shot,my_example), return_tensors='pt')
|
92 |
output = tokenizer.decode(
|
93 |
model.generate(
|
|
|
30 |
|
31 |
What was going on?
|
32 |
"""
|
33 |
+
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=0.5)
|
34 |
|
35 |
inputs = tokenizer(prompt, return_tensors='pt')
|
36 |
output = tokenizer.decode(
|
|
|
73 |
|
74 |
# this is for one_shot
|
75 |
def one_shot(example_indices_full,my_example):
|
76 |
+
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=0.5)
|
77 |
|
78 |
inputs = tokenizer(my_prompt(example_indices_full,my_example), return_tensors='pt')
|
79 |
output = tokenizer.decode(
|
|
|
87 |
|
88 |
# few_shot
|
89 |
def few_shot(example_indices_full_few_shot,my_example):
|
90 |
+
generation_config = GenerationConfig(max_new_tokens=80, do_sample=True, temperature=0.5)
|
91 |
inputs = tokenizer(my_prompt(example_indices_full_few_shot,my_example), return_tensors='pt')
|
92 |
output = tokenizer.decode(
|
93 |
model.generate(
|