Update app.py
Browse files
app.py
CHANGED
|
@@ -70,11 +70,17 @@ def detect_emotion(text):
|
|
| 70 |
# Function to visualize audio waveform
|
| 71 |
def plot_waveform(audio_file):
|
| 72 |
with wave.open(audio_file, 'r') as w:
|
|
|
|
| 73 |
signal = np.frombuffer(w.readframes(w.getnframes()), dtype=np.int16)
|
| 74 |
framerate = w.getframerate()
|
|
|
|
|
|
|
| 75 |
duration = len(signal) / framerate
|
|
|
|
|
|
|
| 76 |
time = np.linspace(0., duration, len(signal))
|
| 77 |
|
|
|
|
| 78 |
plt.figure(figsize=(10, 4))
|
| 79 |
plt.plot(time, signal)
|
| 80 |
plt.title("Audio Waveform")
|
|
|
|
| 70 |
# Function to visualize audio waveform
|
| 71 |
def plot_waveform(audio_file):
|
| 72 |
with wave.open(audio_file, 'r') as w:
|
| 73 |
+
# Extract the signal data and sampling rate
|
| 74 |
signal = np.frombuffer(w.readframes(w.getnframes()), dtype=np.int16)
|
| 75 |
framerate = w.getframerate()
|
| 76 |
+
|
| 77 |
+
# Calculate the correct duration of the audio file
|
| 78 |
duration = len(signal) / framerate
|
| 79 |
+
|
| 80 |
+
# Create time axis for the waveform
|
| 81 |
time = np.linspace(0., duration, len(signal))
|
| 82 |
|
| 83 |
+
# Plot the waveform
|
| 84 |
plt.figure(figsize=(10, 4))
|
| 85 |
plt.plot(time, signal)
|
| 86 |
plt.title("Audio Waveform")
|