jc047 commited on
Commit
a063388
Β·
verified Β·
1 Parent(s): 9f3ee4b

Update src/emotion_detector.py

Browse files
Files changed (1) hide show
  1. src/emotion_detector.py +21 -15
src/emotion_detector.py CHANGED
@@ -1,18 +1,24 @@
1
- import streamlit as st
2
- from emotion_detector import detect_emotion
 
3
 
4
- st.set_page_config(page_title="Real-Time Emotion to Emoji", page_icon="🎧")
5
- st.title("🎧 Real-Time Emotion to Emoji")
 
 
 
6
 
7
- st.markdown("""
8
- Upload a 5-second **WAV** audio clip of your voice, and the AI will detect your emotion and show an emoji!
9
- """)
 
 
 
 
10
 
11
- uploaded_file = st.file_uploader("πŸ“€ Upload your 5-second WAV audio", type=["wav"])
12
-
13
- if uploaded_file is not None:
14
- st.audio(uploaded_file, format='audio/wav')
15
-
16
- with st.spinner("Detecting emotion..."):
17
- emotion, emoji = detect_emotion(uploaded_file)
18
- st.markdown(f"## {emoji} Emotion: **{emotion.title()}**")
 
1
+ # emotion_detector.py
2
+ import torchaudio
3
+ from speechbrain.inference.classifiers import AudioClassifier
4
 
5
+ # Load pretrained model
6
+ classifier = AudioClassifier.from_hparams(
7
+ source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
8
+ savedir="pretrained_models/emotion-recognition"
9
+ )
10
 
11
+ EMOTION_EMOJIS = {
12
+ "angry": "😠",
13
+ "happy": "πŸ˜„",
14
+ "neutral": "😐",
15
+ "sad": "😒",
16
+ "fearful": "😨"
17
+ }
18
 
19
+ def detect_emotion(audio_file):
20
+ signal, fs = torchaudio.load(audio_file)
21
+ prediction = classifier.classify_file(audio_file)
22
+ emotion = prediction[3] # This is the predicted label
23
+ emoji = EMOTION_EMOJIS.get(emotion.lower(), "❓")
24
+ return emotion, emoji