Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	| import requests | |
| from numpy import asarray | |
| import gradio as gr | |
| from transformers import pipeline | |
| answer = requests.get("https://git.io/JJkYN") | |
| labels =answer.text.split("\n") | |
| def classify_image(inp): | |
| inp = asarray(inp.resize((224, 224))) | |
| inp = inp.reshape((-1,) + inp.shape) | |
| inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp) | |
| prediction = inception_net.predict(inp).flatten() | |
| confidences = {labels[k]: float(prediction[k]) for k in range(1000)} | |
| return confidences | |
| def audio_to_text(audio): | |
| text = transcribe(audio)["text"] | |
| return text | |
| def text_to_sentiment(text): | |
| return classifier(text)[0]["label"] | |
| demo = gr.Blocks() | |
| with demo: | |
| gr.Markdown("Example with Gradio Blocks") | |
| with gr.Tabs(): | |
| with gr.TabItem("Transcribe audio in Spanish"): | |
| with gr.Row(): | |
| audio = gr.Audio(sources="microphone", type="filepath") | |
| transcription = gr.Textbox() | |
| transcribeButton = gr.Button("Transcribe") | |
| with gr.TabItem("Sentiment analysis in English and Spanish"): | |
| with gr.Row(): | |
| text = gr.Textbox() | |
| label = gr.Label() | |
| sentimentButton = gr.Button("Calculate sentiment") | |
| with gr.TabItem("Image Classification"): | |
| with gr.Row(): | |
| image = gr.Image(label="Upload an image here") | |
| label_image = gr.Label(num_top_classes=3) | |
| classifyButton = gr.Button("Classify image") | |
| transcribeButton.click(audio_to_text, inputs = audio, outputs=transcription) | |
| sentimentButton.click(text_to_sentiment, inputs=text, outputs=label) | |
| classifyButton. click(classify_image, inputs=image, outputs=label_image) | |
| demo.launch() | |