Update src/emotion_detector.py
Browse files- src/emotion_detector.py +21 -15
src/emotion_detector.py
CHANGED
@@ -1,18 +1,24 @@
|
|
1 |
-
|
2 |
-
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
"""
|
|
|
|
|
|
|
|
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
emotion, emoji = detect_emotion(uploaded_file)
|
18 |
-
st.markdown(f"## {emoji} Emotion: **{emotion.title()}**")
|
|
|
1 |
+
# emotion_detector.py
|
2 |
+
import torchaudio
|
3 |
+
from speechbrain.inference.classifiers import AudioClassifier
|
4 |
|
5 |
+
# Load pretrained model
|
6 |
+
classifier = AudioClassifier.from_hparams(
|
7 |
+
source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
|
8 |
+
savedir="pretrained_models/emotion-recognition"
|
9 |
+
)
|
10 |
|
11 |
+
EMOTION_EMOJIS = {
|
12 |
+
"angry": "π ",
|
13 |
+
"happy": "π",
|
14 |
+
"neutral": "π",
|
15 |
+
"sad": "π’",
|
16 |
+
"fearful": "π¨"
|
17 |
+
}
|
18 |
|
19 |
+
def detect_emotion(audio_file):
|
20 |
+
signal, fs = torchaudio.load(audio_file)
|
21 |
+
prediction = classifier.classify_file(audio_file)
|
22 |
+
emotion = prediction[3] # This is the predicted label
|
23 |
+
emoji = EMOTION_EMOJIS.get(emotion.lower(), "β")
|
24 |
+
return emotion, emoji
|
|
|
|