Update src/emotion_detector.py
Browse files- src/emotion_detector.py +11 -6
src/emotion_detector.py
CHANGED
@@ -1,11 +1,16 @@
|
|
1 |
# emotion_detector.py
|
2 |
import torchaudio
|
3 |
from speechbrain.inference.classifiers import AudioClassifier
|
|
|
|
|
4 |
|
5 |
-
#
|
|
|
|
|
|
|
6 |
classifier = AudioClassifier.from_hparams(
|
7 |
source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
|
8 |
-
savedir=
|
9 |
)
|
10 |
|
11 |
EMOTION_EMOJIS = {
|
@@ -16,9 +21,9 @@ EMOTION_EMOJIS = {
|
|
16 |
"fearful": "π¨"
|
17 |
}
|
18 |
|
19 |
-
def detect_emotion(
|
20 |
-
signal, fs = torchaudio.load(
|
21 |
-
prediction = classifier.classify_file(
|
22 |
-
emotion = prediction[3] #
|
23 |
emoji = EMOTION_EMOJIS.get(emotion.lower(), "β")
|
24 |
return emotion, emoji
|
|
|
1 |
# emotion_detector.py
|
2 |
import torchaudio
|
3 |
from speechbrain.inference.classifiers import AudioClassifier
|
4 |
+
import os
|
5 |
+
import tempfile
|
6 |
|
7 |
+
# Use a temp directory that's guaranteed to be writable
|
8 |
+
temp_dir = os.path.join(tempfile.gettempdir(), "emotion_model")
|
9 |
+
|
10 |
+
# Load pretrained model into temp directory
|
11 |
classifier = AudioClassifier.from_hparams(
|
12 |
source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
|
13 |
+
savedir=temp_dir
|
14 |
)
|
15 |
|
16 |
EMOTION_EMOJIS = {
|
|
|
21 |
"fearful": "π¨"
|
22 |
}
|
23 |
|
24 |
+
def detect_emotion(audio_path):
|
25 |
+
signal, fs = torchaudio.load(audio_path)
|
26 |
+
prediction = classifier.classify_file(audio_path)
|
27 |
+
emotion = prediction[3] # Predicted label
|
28 |
emoji = EMOTION_EMOJIS.get(emotion.lower(), "β")
|
29 |
return emotion, emoji
|