RamAnanth1 commited on
Commit
c6c6f6f
·
1 Parent(s): ddab9eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -15,7 +15,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
15
 
16
  emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion')
17
 
18
- def translate(audio):
19
 
20
  print("""
21
 
@@ -38,7 +38,9 @@ def translate(audio):
38
  print("Language Spoken: " + transcription.language)
39
  print("Transcript: " + transcription.text)
40
  print("Translated: " + translation.text)
41
-
 
 
42
  return transcription.text
43
 
44
  with gr.Blocks() as demo:
@@ -48,7 +50,7 @@ with gr.Blocks() as demo:
48
 
49
  with gr.Row():
50
  with gr.Column():
51
- gr.Markdown(""" ### Record audio """)
52
  audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
53
  with gr.Row():
54
  transcribe_audio = gr.Button('Transcribe')
 
15
 
16
  emotion_classifier = pipeline("text-classification",model='bhadresh-savani/distilbert-base-uncased-emotion')
17
 
18
+ def translate_and_classify(audio):
19
 
20
  print("""
21
 
 
38
  print("Language Spoken: " + transcription.language)
39
  print("Transcript: " + transcription.text)
40
  print("Translated: " + translation.text)
41
+
42
+ sentiment = emotion_classifier(translation.text)
43
+
44
  return transcription.text
45
 
46
  with gr.Blocks() as demo:
 
50
 
51
  with gr.Row():
52
  with gr.Column():
53
+ #gr.Markdown(""" ### Record audio """)
54
  audio_input = gr.Audio(label = 'Record Audio Input',source="microphone",type="filepath")
55
  with gr.Row():
56
  transcribe_audio = gr.Button('Transcribe')