import gradio as gr import wikipedia from transformers import pipeline import os ## Setting to use the 0th GPU os.environ["CUDA_VISIBLE_DEVICES"] = "0" def summarize(text): ## Setting to use the bart-large-cnn model for summarization summarizer = pipeline("summarization") ## To use the t5-base model for summarization: ## summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf") summary_text = summarizer(text, max_length=100, min_length=5, do_sample=False)[0]['summary_text'] print(f'Length of initial text: {len(text)}') print(f'Length of summary: {len(summary_text)}') print(summary_text) def greet(name): return "Hello " + name.orig_name + "!!" def get_ocr(): return '' def search_wiki(text): return wikipedia.search(text) def get_wiki(search_term): return wikipedia.summary(search_term) def inference(file): get_ocr() model = AutoModelForSeq2SeqLM.from_pretrained("sgugger/my-awesome-model") iface = gr.Interface(fn=summarize, inputs="text", outputs="text") iface.launch()