Update app.py
Browse files
app.py
CHANGED
@@ -51,8 +51,8 @@ model_pegasus = PegasusForConditionalGeneration.from_pretrained(model_name).to(t
|
|
51 |
def get_max_str(lst):
|
52 |
return max(lst, key=len)
|
53 |
def get_response(input_text,num_return_sequences=10,num_beams=10):
|
54 |
-
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=
|
55 |
-
translated = model_pegasus.generate(**batch,max_length=
|
56 |
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
57 |
try:
|
58 |
adequacy_filtered_phrases = adequacy_score.filter(input_text,tgt_text, adequacy_threshold, device)
|
|
|
51 |
def get_max_str(lst):
|
52 |
return max(lst, key=len)
|
53 |
def get_response(input_text,num_return_sequences=10,num_beams=10):
|
54 |
+
batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=10,return_tensors='pt').to(torch_device)
|
55 |
+
translated = model_pegasus.generate(**batch,max_length=10,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
|
56 |
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
57 |
try:
|
58 |
adequacy_filtered_phrases = adequacy_score.filter(input_text,tgt_text, adequacy_threshold, device)
|