immelstorun commited on
Commit
9bc71fd
·
1 Parent(s): 73bfdef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -23
app.py CHANGED
@@ -1,6 +1,8 @@
1
- import gradio as gr
2
  from speechbrain.pretrained.interfaces import foreign_class
 
3
  import os
 
 
4
 
5
  # Function to get the list of audio files in the 'rec/' directory
6
  def get_audio_files_list(directory="rec"):
@@ -13,13 +15,13 @@ def get_audio_files_list(directory="rec"):
13
  # Loading the speechbrain emotion detection model
14
  learner = foreign_class(
15
  source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
16
- pymodule_file="custom_interface.py",
17
  classname="CustomEncoderWav2vec2Classifier"
18
  )
19
 
20
  # Building prediction function for Gradio
21
  emotion_dict = {
22
- 'sad': 'Sad',
23
  'hap': 'Happy',
24
  'ang': 'Anger',
25
  'fea': 'Fear',
@@ -27,29 +29,21 @@ emotion_dict = {
27
  'neu': 'Neutral'
28
  }
29
 
30
- def selected_audio(audio_file):
31
- if audio_file is None:
32
- return None, "Please select an audio file."
33
- file_path = os.path.join("rec", audio_file)
34
- audio_data = gr.Audio(file=file_path)
35
  out_prob, score, index, text_lab = learner.classify_file(file_path)
36
  emotion = emotion_dict[text_lab[0]]
37
- return audio_data, emotion
38
 
39
  # Get the list of audio files for the dropdown
40
  audio_files_list = get_audio_files_list()
41
 
42
- # Define Gradio blocks
43
- with gr.Blocks() as blocks:
44
- gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>" +
45
- "Audio Emotion Detection" +
46
- "</h1>")
47
- with gr.Column():
48
- input_audio_dropdown = gr.Dropdown(label="Select Audio", choices=audio_files_list)
49
- audio_ui = gr.Audio()
50
- output_text = gr.Textbox(label="Detected Emotion!")
51
- detect_btn = gr.Button("Detect Emotion")
52
- detect_btn.click(selected_audio, inputs=input_audio_dropdown, outputs=[audio_ui, output_text])
53
-
54
- # Launch the Gradio blocks interface
55
- blocks.launch()
 
 
1
  from speechbrain.pretrained.interfaces import foreign_class
2
+ import gradio as gr
3
  import os
4
+ import warnings
5
+ warnings.filterwarnings("ignore")
6
 
7
  # Function to get the list of audio files in the 'rec/' directory
8
  def get_audio_files_list(directory="rec"):
 
15
  # Loading the speechbrain emotion detection model
16
  learner = foreign_class(
17
  source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
18
+ pymodule_file="custom_interface.py",
19
  classname="CustomEncoderWav2vec2Classifier"
20
  )
21
 
22
  # Building prediction function for Gradio
23
  emotion_dict = {
24
+ 'sad': 'Sad',
25
  'hap': 'Happy',
26
  'ang': 'Anger',
27
  'fea': 'Fear',
 
29
  'neu': 'Neutral'
30
  }
31
 
32
+ def predict_emotion(selected_audio):
33
+ file_path = os.path.join("rec", selected_audio)
 
 
 
34
  out_prob, score, index, text_lab = learner.classify_file(file_path)
35
  emotion = emotion_dict[text_lab[0]]
36
+ return emotion, file_path # Return both emotion and file path
37
 
38
  # Get the list of audio files for the dropdown
39
  audio_files_list = get_audio_files_list()
40
 
41
+ # Loading Gradio interface
42
+ inputs = gr.Dropdown(label="Select Audio", choices=audio_files_list)
43
+ outputs = [gr.outputs.Textbox(label="Predicted Emotion"), gr.outputs.Audio(label="Play Audio")]
44
+
45
+ title = "ML Speech Emotion Detection3"
46
+ description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
47
+
48
+ interface = gr.Interface(fn=predict_emotion, inputs=inputs, outputs=outputs, title=title, description=description)
49
+ interface.launch()