kedar-bhumkar's picture
Upload 12 files
ae55e39 verified
raw
history blame
3.92 kB
import streamlit as st
import os
import tempfile
from backend import predict_emotion
# Set page configuration
st.set_page_config(
page_title="Audio Emotion Analyzer",
page_icon="🎡",
layout="centered"
)
# Title and description
st.title("🎡 Audio Emotion Analyzer")
st.markdown("Upload a .wav file or select an existing audio file to analyze the emotion in the speech.")
# Function to load audio files from current directory and subdirectories
def get_audio_files():
audio_files = []
# Scan current directory and immediate subdirectories
for root, dirs, files in os.walk('.', topdown=True):
# Limit depth to current directory and immediate subdirectories
if root.count(os.sep) <= 1: # Only include current dir and immediate subdirs
for file in files:
if file.lower().endswith('.wav'):
rel_path = os.path.join(root, file)
# Remove leading ./ or .\ from path
if rel_path.startswith('./') or rel_path.startswith('.\\'):
rel_path = rel_path[2:]
audio_files.append(rel_path)
return sorted(audio_files)
# Get audio files
audio_files = get_audio_files()
# Create two columns for upload and file selection
col1, col2 = st.columns(2)
with col1:
st.subheader("Upload your audio")
uploaded_file = st.file_uploader("Choose a .wav file", type=["wav"])
with col2:
st.subheader("Or select an existing file")
selected_file = None
if audio_files:
selected_file = st.selectbox("Choose an audio file", ["None"] + audio_files)
else:
st.info("No .wav files found in the current directory or immediate subdirectories.")
# Determine which file to use
audio_file = None
file_path = None
if uploaded_file is not None:
# Create a temporary file to save the uploaded file
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
tmp_file.write(uploaded_file.getvalue())
file_path = tmp_file.name
audio_file = uploaded_file.name
st.audio(uploaded_file, format="audio/wav")
elif selected_file is not None and selected_file != "None":
file_path = selected_file
audio_file = selected_file
st.audio(file_path, format="audio/wav")
# Submit button
if st.button("Analyze Emotion", disabled=(file_path is None)):
if file_path:
with st.spinner("Analyzing audio..."):
# Call the backend function to predict emotion
emotion = predict_emotion(file_path)
# Display the result
st.success(f"Analysis complete!")
st.markdown(f"## Predicted Emotion: **{emotion}**")
# Display emoji based on emotion
emoji_map = {
"Neutral": "😐",
"Happy": "😊",
"Sad": "😒",
"Angry": "😠",
"Fearful": "😨",
"Disgusted": "🀒",
"Surprised": "😲"
}
emoji = emoji_map.get(emotion, "πŸ€”")
st.markdown(f"# {emoji}")
# Clean up temporary file if it was created
if uploaded_file is not None:
os.unlink(file_path)
else:
st.warning("Please upload a file or select an existing file first.")
# Add some information about the app
st.markdown("---")
st.markdown("""
### About this app
This application uses a pre-trained Wav2Vec2 model to analyze the emotional tone in speech audio.
The model can detect 7 different emotions: Neutral, Happy, Sad, Angry, Fearful, Disgusted, and Surprised.
### How to use
1. Upload a .wav file or select an existing audio file
2. Click the "Analyze Emotion" button
3. View the predicted emotion result
""")