Spaces:
Runtime error
Runtime error
File size: 2,879 Bytes
1ebdc35 321912e 1ebdc35 321912e dc6aef0 1ebdc35 321912e 1ebdc35 dc6aef0 1ebdc35 c0915d4 321912e 1ebdc35 4be77f4 1ebdc35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
fastspeech = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech")
def text2speech(text):
return fastspeech(text)
def engine(text_input):
#ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
#entities = ner(text_input)
#entities = [tupl for tupl in entities if None not in tupl]
#entities_num = len(entities)
#img = run(text_input,'50','256','256','1',10) #entities[0][0]
inputs = [text_input,'50','256','256','1',10]
img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=inputs).launch()
#img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=[gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text")],
#outputs=[gr.outputs.Image(type="pil", label="output image"),gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], )
#title="Convert text to image")
#img = img_intfc[0]
#img = img_intfc(text_input,'50','256','256','1',10)
print(img_intfc)
print(type(img_intfc))
#print(len(img[1]))
#print(type(img[1][0][0]))
#print(img[1])
#img = img[0]
#inputs=['George',50,256,256,1,10]
#run(prompt, steps, width, height, images, scale)
#speech = text2speech(text_input)
return img_intfc #entities, speech, img
image = gr.outputs.Image(type="pil", label="output image")
app = gr.Interface(fn=engine,
inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
#live=True,
description="Takes a text as input and reads it out to you.",
#outputs=[#gr.outputs.Textbox(type="auto", label="Text"),gr.outputs.Audio(type="file", label="Speech Answer"),
outputs= image, #gr.outputs.Carousel(label="Individual images",components=["image"]), #, gr.outputs.Textbox(label="Error")],
examples = ['Apple']
#examples=["On April 17th Sunday George celebrated Easter. He is staying at Empire State building with his parents. He is a citizen of Canada and speaks English and French fluently. His role model is former president Obama. He got 1000 dollar from his mother to visit Disney World and to buy new iPhone mobile. George likes watching Game of Thrones."]
).launch(debug=True) #(enable_queue=True)
#get_audio = gr.Button("generate audio")
#get_audio.click(text2speech, inputs=text, outputs=speech)
#def greet(name):
# return "Hello " + name + "!!"
#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch() |