Vaishakhh commited on
Commit
c32dc43
·
1 Parent(s): a644223

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -51,8 +51,8 @@ model_pegasus = PegasusForConditionalGeneration.from_pretrained(model_name).to(t
51
  def get_max_str(lst):
52
  return max(lst, key=len)
53
  def get_response(input_text,num_return_sequences=10,num_beams=10):
54
- batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=30,return_tensors='pt').to(torch_device)
55
- translated = model_pegasus.generate(**batch,max_length=30,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
56
  tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
57
  try:
58
  adequacy_filtered_phrases = adequacy_score.filter(input_text,tgt_text, adequacy_threshold, device)
 
51
  def get_max_str(lst):
52
  return max(lst, key=len)
53
  def get_response(input_text,num_return_sequences=10,num_beams=10):
54
+ batch = tokenizer.prepare_seq2seq_batch([input_text],truncation=True,padding='longest',max_length=10,return_tensors='pt').to(torch_device)
55
+ translated = model_pegasus.generate(**batch,max_length=10,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
56
  tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
57
  try:
58
  adequacy_filtered_phrases = adequacy_score.filter(input_text,tgt_text, adequacy_threshold, device)