Hann99 commited on
Commit
3ff465b
·
1 Parent(s): e56cbde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -1,11 +1,13 @@
1
- from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
2
  import gradio as grad
3
- import ast
4
- mdl_name = "distilbert-base-cased-distilled-squad"
5
- my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
6
- def answer_question(question,context):
7
- text= "{"+"'question': '"+question+"','context': '"+context+"'}"
8
- di=ast.literal_eval(text)
9
- response = my_pipeline(di)
10
  return response
11
- grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
 
 
 
1
+ from transformers import PegasusForConditionalGeneration, PegasusTokenizer
2
  import gradio as grad
3
+ mdl_name = "google/pegasus-xsum"
4
+ pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
5
+ mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
6
+ def summarize(text):
7
+ tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
8
+ txt_summary = mdl.generate(**tokens)
9
+ response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
10
  return response
11
+ txt=grad.Textbox(lines=10, label="English", placeholder="English Text here")
12
+ out=grad.Textbox(lines=10, label="Summary")
13
+ grad.Interface(summarize, inputs=txt, outputs=out).launch()