File size: 1,120 Bytes
8e0affa
5748770
0da2675
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e0affa
 
5748770
8e0affa
5748770
 
 
 
 
 
 
 
 
 
8ff4ae4
 
5748770
 
0b1a1c5
 
 
0da2675
 
5748770
 
 
8ff4ae4
8e0affa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
import wikipedia
from transformers import pipeline
import os

## Setting to use the 0th GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

def summarize(text):
    ## Setting to use the bart-large-cnn model for summarization
    summarizer = pipeline("summarization")

    ## To use the t5-base model for summarization:
    ## summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base", framework="tf")

    summary_text = summarizer(text, max_length=100, min_length=5, do_sample=False)[0]['summary_text']
    print(f'Length of initial text: {len(text)}')
    print(f'Length of summary: {len(summary_text)}')
    print(summary_text)

def greet(name):
    return "Hello " + name.orig_name + "!!"


def get_ocr():
    return ''


def search_wiki(text):
    return wikipedia.search(text)


def get_wiki(search_term):
    text = summarize(wikipedia.summary(search_term))
    return text


# def inference(file):
    # get_ocr()
    # model = AutoModelForSeq2SeqLM.from_pretrained("sgugger/my-awesome-model")





iface = gr.Interface(fn=get_wiki, inputs="text", outputs="text")
iface.launch()