|
import streamlit as st
|
|
import os
|
|
import tempfile
|
|
from backend import predict_emotion
|
|
|
|
|
|
st.set_page_config(
|
|
page_title="Audio Emotion Analyzer",
|
|
page_icon="π΅",
|
|
layout="centered"
|
|
)
|
|
|
|
|
|
st.title("π΅ Audio Emotion Analyzer")
|
|
st.markdown("Upload a .wav file or select an existing audio file to analyze the emotion in the speech.")
|
|
|
|
|
|
def get_audio_files():
|
|
audio_files = []
|
|
|
|
for root, dirs, files in os.walk('.', topdown=True):
|
|
|
|
if root.count(os.sep) <= 1:
|
|
for file in files:
|
|
if file.lower().endswith('.wav'):
|
|
rel_path = os.path.join(root, file)
|
|
|
|
if rel_path.startswith('./') or rel_path.startswith('.\\'):
|
|
rel_path = rel_path[2:]
|
|
audio_files.append(rel_path)
|
|
return sorted(audio_files)
|
|
|
|
|
|
audio_files = get_audio_files()
|
|
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
with col1:
|
|
st.subheader("Upload your audio")
|
|
uploaded_file = st.file_uploader("Choose a .wav file", type=["wav"])
|
|
|
|
with col2:
|
|
st.subheader("Or select an existing file")
|
|
selected_file = None
|
|
if audio_files:
|
|
selected_file = st.selectbox("Choose an audio file", ["None"] + audio_files)
|
|
else:
|
|
st.info("No .wav files found in the current directory or immediate subdirectories.")
|
|
|
|
|
|
audio_file = None
|
|
file_path = None
|
|
|
|
if uploaded_file is not None:
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
|
tmp_file.write(uploaded_file.getvalue())
|
|
file_path = tmp_file.name
|
|
audio_file = uploaded_file.name
|
|
st.audio(uploaded_file, format="audio/wav")
|
|
|
|
elif selected_file is not None and selected_file != "None":
|
|
file_path = selected_file
|
|
audio_file = selected_file
|
|
st.audio(file_path, format="audio/wav")
|
|
|
|
|
|
if st.button("Analyze Emotion", disabled=(file_path is None)):
|
|
if file_path:
|
|
with st.spinner("Analyzing audio..."):
|
|
|
|
emotion = predict_emotion(file_path)
|
|
|
|
|
|
st.success(f"Analysis complete!")
|
|
st.markdown(f"## Predicted Emotion: **{emotion}**")
|
|
|
|
|
|
emoji_map = {
|
|
"Neutral": "π",
|
|
"Happy": "π",
|
|
"Sad": "π’",
|
|
"Angry": "π ",
|
|
"Fearful": "π¨",
|
|
"Disgusted": "π€’",
|
|
"Surprised": "π²"
|
|
}
|
|
|
|
emoji = emoji_map.get(emotion, "π€")
|
|
st.markdown(f"# {emoji}")
|
|
|
|
|
|
if uploaded_file is not None:
|
|
os.unlink(file_path)
|
|
else:
|
|
st.warning("Please upload a file or select an existing file first.")
|
|
|
|
|
|
st.markdown("---")
|
|
st.markdown("""
|
|
### About this app
|
|
This application uses a pre-trained Wav2Vec2 model to analyze the emotional tone in speech audio.
|
|
The model can detect 7 different emotions: Neutral, Happy, Sad, Angry, Fearful, Disgusted, and Surprised.
|
|
|
|
### How to use
|
|
1. Upload a .wav file or select an existing audio file
|
|
2. Click the "Analyze Emotion" button
|
|
3. View the predicted emotion result
|
|
""")
|
|
|