Spaces:
Runtime error
Runtime error
File size: 2,529 Bytes
1acd416 8206f72 1acd416 283f036 1acd416 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import gradio as gr
import wave
import matplotlib.pyplot as plt
import numpy as np
from extract_features import *
import pickle
import soundfile
import librosa
classifier = pickle.load(open('finalized_rf.sav', 'rb'))
def emotion_predict(input):
input_features = extract_feature(input, mfcc=True, chroma=True, mel=True, contrast=True, tonnetz=True)
rf_prediction = classifier.predict(input_features.reshape(1,-1))
if rf_prediction == 'happy':
return 'Happy π'
elif rf_prediction == 'neutral':
return 'Neutral π'
elif rf_prediction == 'sad':
return 'Sad π’'
else:
return 'Angry π€'
def plot_fig(input):
wav = wave.open(input, 'r')
raw = wav.readframes(-1)
raw = np.frombuffer(raw, "int16")
sampleRate = wav.getframerate()
Time = np.linspace(0, len(raw)/sampleRate, num=len(raw))
fig = plt.figure()
plt.rcParams["figure.figsize"] = (100,30)
plt.title("Waveform Of the Audio", fontsize=100)
plt.xticks(fontsize=50)
plt.yticks(fontsize=50)
plt.ylabel("Amplitude", fontsize=100)
plt.plot(Time, raw, color='red')
return fig
with gr.Blocks() as app:
gr.Markdown(
"""
# Speech Emotion Detector π΅π
This application classifies inputted audio π according to the verbal emotion into four categories:
1. Happy π
1. Neutral π
1. Sad π’
1. Angry π€
"""
)
with gr.Tab("Record Audio"):
record_input = gr.Audio(source="microphone", type="filepath")
with gr.Accordion("Audio Visualization"):
plot_record = gr.Button("Display Audio Signal")
plot_record_c = gr.Plot(label='Waveform Of the Audio')
record_button = gr.Button("Detect Emotion")
record_output = gr.Text(label = 'Emotion Detected')
with gr.Tab("Upload Audio File"):
gr.Markdown(
"""
## Uploaded Audio should be of .wav format
"""
)
upload_input = gr.Audio(type="filepath")
with gr.Accordion("Audio Visualization"):
plot_upload = gr.Button("Display Audio Signal")
plot_upload_c = gr.Plot(label='Waveform Of the Audio')
upload_button = gr.Button("Detect Emotion")
upload_output = gr.Text(label = 'Emotion Detected')
record_button.click(emotion_predict, inputs=record_input, outputs=record_output)
upload_button.click(emotion_predict, inputs=upload_input, outputs=upload_output)
plot_record.click(plot_fig, inputs=record_input, outputs=plot_record_c)
plot_upload.click(plot_fig, inputs=upload_input, outputs=plot_upload_c)
app.launch() |