Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import wikipedia | |
| from transformers import pipeline | |
| import os | |
| ## Setting to use the 0th GPU | |
| os.environ["CUDA_VISIBLE_DEVICES"] = "0" | |
| def summarize(text): | |
| ## Setting to use the bart-large-cnn model for summarization | |
| summarizer = pipeline("summarization") | |
| ## To use the t5-base model for summarization: | |
| ## summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") | |
| summary_text = summarizer(text, max_length=100, min_length=5, do_sample=False)[0]['summary_text'] | |
| print(f'Length of initial text: {len(text)}') | |
| print(f'Length of summary: {len(summary_text)}') | |
| print(summary_text) | |
| return summary_text | |
| def greet(name): | |
| return "Hello " + name.orig_name + "!!" | |
| def get_ocr(): | |
| return '' | |
| def search_wiki(text): | |
| return wikipedia.search(text) | |
| def get_wiki(search_term): | |
| text = wikipedia.summary(search_term) | |
| orig_text_len = len(text) | |
| text = summarize(text) | |
| sum_length = len(text) | |
| return [text,orig_text_len,sum_length] | |
| # def inference(file): | |
| # get_ocr() | |
| # model = AutoModelForSeq2SeqLM.from_pretrained("sgugger/my-awesome-model") | |
| iface = gr.Interface(fn=get_wiki, inputs="text", outputs=["text",'orig_length','sum_length']) | |
| iface.launch() |