Spaces:
Runtime error
Runtime error
Commit
·
7e82c91
1
Parent(s):
3ae5bc7
Update app.py
Browse files
app.py
CHANGED
@@ -131,7 +131,7 @@ with gr.Blocks(css = css) as demo:
|
|
131 |
""")
|
132 |
gr.HTML('''
|
133 |
<p style="margin-bottom: 10px; font-size: 94%">
|
134 |
-
Whisper is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. Combined with a emotion detection model,this allows for detecting emotion directly from speech in multiple languages and can potentially be used to analyze
|
135 |
</p>
|
136 |
''')
|
137 |
|
@@ -150,7 +150,7 @@ with gr.Blocks(css = css) as demo:
|
|
150 |
emotion_output = gr.Textbox(label = "Detected Emotion")
|
151 |
|
152 |
transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output])
|
153 |
-
transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output])
|
154 |
gr.HTML('''
|
155 |
<div class="footer">
|
156 |
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -
|
@@ -158,6 +158,7 @@ with gr.Blocks(css = css) as demo:
|
|
158 |
</p>
|
159 |
</div>
|
160 |
''')
|
|
|
161 |
|
162 |
|
163 |
demo.launch()
|
|
|
131 |
""")
|
132 |
gr.HTML('''
|
133 |
<p style="margin-bottom: 10px; font-size: 94%">
|
134 |
+
Whisper is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification. Combined with a emotion detection model,this allows for detecting emotion directly from speech in multiple languages and can potentially be used to analyze sentiment from customer calls.
|
135 |
</p>
|
136 |
''')
|
137 |
|
|
|
150 |
emotion_output = gr.Textbox(label = "Detected Emotion")
|
151 |
|
152 |
transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output])
|
153 |
+
transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output])
|
154 |
gr.HTML('''
|
155 |
<div class="footer">
|
156 |
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -
|
|
|
158 |
</p>
|
159 |
</div>
|
160 |
''')
|
161 |
+
#gr.Markdown("")
|
162 |
|
163 |
|
164 |
demo.launch()
|