Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -25,12 +25,12 @@ def response(user_question, table_data):
|
|
25 |
# Experiment with generation parameters
|
26 |
outputs = model.generate(
|
27 |
**encoding,
|
28 |
-
num_beams=5, # Beam search to generate more diverse responses
|
29 |
-
top_k=50, # Top-k sampling for diversity
|
30 |
-
top_p=0.95, # Nucleus sampling
|
31 |
-
temperature=0.7, # Temperature scaling (if supported by the model)
|
32 |
-
max_length=50, # Limit the length of the generated response
|
33 |
-
early_stopping=True # Stop generation when an end token is generated
|
34 |
)
|
35 |
|
36 |
ans = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
|
|
25 |
# Experiment with generation parameters
|
26 |
outputs = model.generate(
|
27 |
**encoding,
|
28 |
+
#num_beams=5, # Beam search to generate more diverse responses
|
29 |
+
#top_k=50, # Top-k sampling for diversity
|
30 |
+
#top_p=0.95, # Nucleus sampling
|
31 |
+
#temperature=0.7, # Temperature scaling (if supported by the model)
|
32 |
+
#max_length=50, # Limit the length of the generated response
|
33 |
+
#early_stopping=True # Stop generation when an end token is generated
|
34 |
)
|
35 |
|
36 |
ans = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|