File size: 2,207 Bytes
be95b82
 
6352c36
be95b82
d91e943
be95b82
ba37341
 
be95b82
 
d91e943
db16d10
5f6c6c9
ba37341
dde29c3
ba37341
d91e943
 
 
 
 
6352c36
 
1e77da0
 
6352c36
78bab9d
 
 
 
 
d91e943
be95b82
 
44b7d5d
d91e943
44b7d5d
 
d91e943
ba37341
 
44b7d5d
 
78bab9d
ba37341
d91e943
7bec222
d91e943
 
 
6352c36
 
 
 
 
be95b82
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from transformers import pipeline
import gradio as gr
from diffusers import DiffusionPipeline

# 1. text summarizer
summarizer = pipeline("summarization", model = "facebook/bart-large-cnn")
def get_summary(text):
    output = summarizer(text)
    return output[0]["summary_text"]

# 2. named entity recognition
ner_model = pipeline("ner", model = "dslim/bert-large-NER")
def get_ner(text):
    output = ner_model(text)
    return {"text":text, "entities":output}

# 3. Image Captioning
caption_model = pipeline("image-to-text", model = "Salesforce/blip-image-captioning-base")
def get_caption(img):
    output = caption_model(img)
    return output[0]["generated_text"]

# 4. Image Generation
# img_model = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
img_model = DiffusionPipeline.from_pretrained("Linaqruf/anything-v3.0")
def get_img(prompt):
    print("Here inside")
    img = img_model(prompt)
    print("Generated")
    return img.images[0]
    

demo = gr.Blocks()
with demo:
    gr.Markdown("# Try out some cool tasks!")
    with gr.Tab("Text Summarization"):
        sum_input = [gr.Textbox(label="Text to Summarize", placeholder="Enter text to summarize...", lines=4)]
        sum_btn = gr.Button("Summarize text")
        sum_output = [gr.Textbox(label="Summarized Text")]
        sum_btn.click(get_summary, sum_input, sum_output)
    with gr.Tab("Named Entity Recognition"):
        ner_input = [gr.Textbox(label="Text to find Entities", placeholder = "Enter text...", lines = 4)]
        ner_btn = gr.Button("Generate entities")
        ner_output = [gr.HighlightedText(label="Text with entities")]
        ner_btn.click(get_ner, ner_input, ner_output)
    with gr.Tab("Image Captioning"):
        cap_input = [gr.Image(label="Upload Image", type="pil")]
        cap_btn = gr.Button("Generate Caption")
        cap_output = [gr.Textbox(label="Caption")]
        cap_btn.click(get_caption, cap_input, cap_output)
    with gr.Tab("Image Generation"):
        img_input = [gr.Textbox(label="Your Text")]
        img_btn = gr.Button("Generate Image")
        img_output = [gr.Image(label="Generated Image")]
        img_btn.click(get_img, img_input, img_output)

demo.launch()