RamAnanth1 commited on
Commit
24cbf9c
·
1 Parent(s): 673aba3
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -137,15 +137,20 @@ with gr.Blocks(css = css) as demo:
137
 
138
  with gr.Column():
139
  #gr.Markdown(""" ### Record audio """)
140
- audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
141
- transcribe_audio = gr.Button('Transcribe')
 
 
 
 
 
142
 
143
  with gr.Row():
144
  transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3)
145
  emotion_output = gr.Textbox(label = "Detected Emotion")
146
 
147
- transcribe_audio.click(translate_and_classify, inputs = audio_input, outputs = [transcript_output,emotion_output])
148
-
149
  gr.HTML('''
150
  <div class="footer">
151
  <p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -
 
137
 
138
  with gr.Column():
139
  #gr.Markdown(""" ### Record audio """)
140
+ with gr.Tab("Record Audio"):
141
+ audio_input_r = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
142
+ transcribe_audio_r = gr.Button('Transcribe')
143
+
144
+ with gr.Tab("Upload Audio as File"):
145
+ audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath")
146
+ transcribe_audio_u = gr.Button('Transcribe')
147
 
148
  with gr.Row():
149
  transcript_output = gr.Textbox(label="Transcription in the language you spoke", lines = 3)
150
  emotion_output = gr.Textbox(label = "Detected Emotion")
151
 
152
+ transcribe_audio_r.click(translate_and_classify, inputs = audio_input_r, outputs = [transcript_output,emotion_output])
153
+ transcribe_audio_u.click(translate_and_classify, inputs = audio_input_u, outputs = [transcript_output,emotion_output])
154
  gr.HTML('''
155
  <div class="footer">
156
  <p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> -