shukdevdatta123 commited on
Commit
aaba9d0
·
verified ·
1 Parent(s): 69464df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -31
app.py CHANGED
@@ -6,8 +6,6 @@ import tempfile
6
  import os
7
  import io
8
  from transformers import pipeline
9
- import numpy as np
10
- import wave
11
  import matplotlib.pyplot as plt
12
 
13
  # Function to convert video to audio
@@ -67,27 +65,6 @@ def detect_emotion(text):
67
  emotions = {emotion['label']: emotion['score'] for emotion in result[0]}
68
  return emotions
69
 
70
- # Function to visualize audio waveform
71
- def plot_waveform(audio_file):
72
- with wave.open(audio_file, 'r') as w:
73
- # Extract the signal data and sampling rate
74
- signal = np.frombuffer(w.readframes(w.getnframes()), dtype=np.int16)
75
- framerate = w.getframerate()
76
-
77
- # Calculate the duration of the audio file
78
- duration = len(signal) / framerate
79
-
80
- # Create time axis for the waveform using correct duration and framerate
81
- time = np.arange(0, len(signal)) / framerate
82
-
83
- # Plot the waveform
84
- plt.figure(figsize=(10, 4))
85
- plt.plot(time, signal)
86
- plt.title("Audio Waveform")
87
- plt.xlabel("Duration (seconds)")
88
- plt.ylabel("Amplitude")
89
- st.pyplot(plt)
90
-
91
  # Streamlit app layout
92
  st.title("Video and Audio to Text Transcription with Emotion Detection and Visualization")
93
  st.write("Upload a video or audio file to convert it to transcription, detect emotions, and visualize the audio waveform.")
@@ -124,10 +101,6 @@ if tab == "Video":
124
  # Emotion detection
125
  emotions = detect_emotion(transcription)
126
  st.write(f"Detected Emotions: {emotions}")
127
-
128
- # Plot the audio waveform
129
- st.subheader("Audio Waveform Visualization")
130
- plot_waveform(wav_audio_file)
131
 
132
  # Store transcription and audio file in session state
133
  st.session_state.transcription = transcription
@@ -192,10 +165,6 @@ elif tab == "Audio":
192
  # Emotion detection
193
  emotions = detect_emotion(transcription)
194
  st.write(f"Detected Emotions: {emotions}")
195
-
196
- # Plot the audio waveform
197
- st.subheader("Audio Waveform Visualization")
198
- plot_waveform(wav_audio_file)
199
 
200
  # Store transcription in session state
201
  st.session_state.transcription_audio = transcription
 
6
  import os
7
  import io
8
  from transformers import pipeline
 
 
9
  import matplotlib.pyplot as plt
10
 
11
  # Function to convert video to audio
 
65
  emotions = {emotion['label']: emotion['score'] for emotion in result[0]}
66
  return emotions
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  # Streamlit app layout
69
  st.title("Video and Audio to Text Transcription with Emotion Detection and Visualization")
70
  st.write("Upload a video or audio file to convert it to transcription, detect emotions, and visualize the audio waveform.")
 
101
  # Emotion detection
102
  emotions = detect_emotion(transcription)
103
  st.write(f"Detected Emotions: {emotions}")
 
 
 
 
104
 
105
  # Store transcription and audio file in session state
106
  st.session_state.transcription = transcription
 
165
  # Emotion detection
166
  emotions = detect_emotion(transcription)
167
  st.write(f"Detected Emotions: {emotions}")
 
 
 
 
168
 
169
  # Store transcription in session state
170
  st.session_state.transcription_audio = transcription