File size: 3,075 Bytes
1ebdc35
 
 
 
 
 
 
 
66c69b6
 
 
 
1ebdc35
 
66c69b6
2d16b1e
1ebdc35
 
 
 
856b53c
66c69b6
 
856b53c
321912e
dc6aef0
1ebdc35
 
 
 
66c69b6
 
1ebdc35
df1b15b
f99bb9b
 
66c69b6
1ebdc35
c0915d4
df1b15b
04086e6
 
1ebdc35
4be77f4
1ebdc35
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import gradio as gr

fastspeech = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech")

def text2speech(text):
    return fastspeech(text)
    
def engine(text_input):
    ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
    entities = ner(text_input)
    entities = [tupl for tupl in entities if None not in tupl]
    entities_num = len(entities)
    
    #img = run(text_input,'50','256','256','1',10)  #entities[0][0]
    
    img = gr.Interface.load("spaces/multimodalart/latentdiffusion")(text_input,'50','256','256','1',10)[0] #inputs={'prompt':text_input,'steps':'50','width':'256','height':'256','images':'1','scale':10}).launch()
    #img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=[gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text")],
    #outputs=[gr.outputs.Image(type="pil", label="output image"),gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], )
    #title="Convert text to image")
    #img = img_intfc[0]
    #img = img_intfc(text_input,'50','256','256','1',10)
    #print(img)
    #print(type(img))
    #print(img)
    #print(type(img[1][0][0]))
    #print(img[1])
    #img = img[0]
    #inputs=['George',50,256,256,1,10]
    #run(prompt, steps, width, height, images, scale)
    
    speech = text2speech(text_input)
    return img, entities, speech
    
#image = gr.outputs.Image(type="pil", label="output image")
app = gr.Interface(engine, 
                   gr.inputs.Textbox(lines=5, label="Input Text"),
                   [gr.outputs.Image(type="auto", label="Output"), gr.outputs.Textbox(type="auto", label="Text"), gr.outputs.Audio(type="file", label="Speech Answer") ],
                   #live=True,
                   #outputs=[#gr.outputs.Textbox(type="auto", label="Text"),gr.outputs.Audio(type="file", label="Speech Answer"),
                   #outputs= img, #gr.outputs.Carousel(label="Individual images",components=["image"]), #, gr.outputs.Textbox(label="Error")],
                   examples = ['Apple'], 
                   description="Takes a text as input and reads it out to you." 
                   #examples=["On April 17th Sunday George celebrated Easter. He is staying at Empire State building with his parents. He is a citizen of Canada and speaks English and French fluently. His role model is former president Obama. He got 1000 dollar from his mother to visit Disney World and to buy new iPhone mobile.  George likes watching Game of Thrones."]
                   ).launch(debug=True)  #(enable_queue=True)
                   
 
 #get_audio = gr.Button("generate audio")
 #get_audio.click(text2speech, inputs=text, outputs=speech)
 
#def greet(name):
#    return "Hello " + name + "!!"

#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()