Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,10 @@ from pydub import AudioSegment
|
|
| 5 |
import tempfile
|
| 6 |
import os
|
| 7 |
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Function to convert video to audio
|
| 10 |
def video_to_audio(video_file):
|
|
@@ -51,9 +55,26 @@ def transcribe_audio(audio_file):
|
|
| 51 |
except sr.RequestError:
|
| 52 |
return "Could not request results from Google Speech Recognition service."
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
# Streamlit app layout
|
| 55 |
-
st.title("Video and Audio to Text Transcription")
|
| 56 |
-
st.write("Upload a video or audio file to convert it to transcription.")
|
| 57 |
|
| 58 |
# Create tabs to separate video and audio uploads
|
| 59 |
tab = st.selectbox("Select the type of file to upload", ["Video", "Audio"])
|
|
@@ -71,6 +92,7 @@ if tab == "Video":
|
|
| 71 |
# Add an "Analyze Video" button
|
| 72 |
if st.button("Analyze Video"):
|
| 73 |
with st.spinner("Processing video... Please wait."):
|
|
|
|
| 74 |
# Convert video to audio
|
| 75 |
audio_file = video_to_audio(tmp_video_path)
|
| 76 |
|
|
@@ -83,6 +105,14 @@ if tab == "Video":
|
|
| 83 |
# Show the transcription
|
| 84 |
st.text_area("Transcription", transcription, height=300)
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
# Store transcription and audio file in session state
|
| 87 |
st.session_state.transcription = transcription
|
| 88 |
|
|
@@ -130,6 +160,7 @@ elif tab == "Audio":
|
|
| 130 |
# Add an "Analyze Audio" button
|
| 131 |
if st.button("Analyze Audio"):
|
| 132 |
with st.spinner("Processing audio... Please wait."):
|
|
|
|
| 133 |
# Convert audio to WAV if it's in MP3 format
|
| 134 |
if uploaded_audio.type == "audio/mpeg":
|
| 135 |
wav_audio_file = convert_mp3_to_wav(tmp_audio_path)
|
|
@@ -142,6 +173,14 @@ elif tab == "Audio":
|
|
| 142 |
# Show the transcription
|
| 143 |
st.text_area("Transcription", transcription, height=300)
|
| 144 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
# Store transcription in session state
|
| 146 |
st.session_state.transcription_audio = transcription
|
| 147 |
|
|
@@ -173,4 +212,4 @@ elif tab == "Audio":
|
|
| 173 |
data=st.session_state.wav_audio_file_audio,
|
| 174 |
file_name="converted_audio_audio.wav",
|
| 175 |
mime="audio/wav"
|
| 176 |
-
)
|
|
|
|
| 5 |
import tempfile
|
| 6 |
import os
|
| 7 |
import io
|
| 8 |
+
from textblob import TextBlob
|
| 9 |
+
import numpy as np
|
| 10 |
+
import wave
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
|
| 13 |
# Function to convert video to audio
|
| 14 |
def video_to_audio(video_file):
|
|
|
|
| 55 |
except sr.RequestError:
|
| 56 |
return "Could not request results from Google Speech Recognition service."
|
| 57 |
|
| 58 |
+
# Function for sentiment analysis using TextBlob
|
| 59 |
+
def analyze_sentiment(text):
|
| 60 |
+
blob = TextBlob(text)
|
| 61 |
+
sentiment = blob.sentiment
|
| 62 |
+
return sentiment
|
| 63 |
+
|
| 64 |
+
# Function to visualize audio waveform
|
| 65 |
+
def plot_waveform(audio_file):
|
| 66 |
+
with wave.open(audio_file, 'r') as w:
|
| 67 |
+
signal = np.frombuffer(w.readframes(w.getnframes()), dtype=np.int16)
|
| 68 |
+
plt.figure(figsize=(10, 4))
|
| 69 |
+
plt.plot(signal)
|
| 70 |
+
plt.title("Audio Waveform")
|
| 71 |
+
plt.xlabel("Sample")
|
| 72 |
+
plt.ylabel("Amplitude")
|
| 73 |
+
st.pyplot(plt)
|
| 74 |
+
|
| 75 |
# Streamlit app layout
|
| 76 |
+
st.title("Video and Audio to Text Transcription with Sentiment and Visualization")
|
| 77 |
+
st.write("Upload a video or audio file to convert it to transcription, analyze sentiment, and visualize the audio waveform.")
|
| 78 |
|
| 79 |
# Create tabs to separate video and audio uploads
|
| 80 |
tab = st.selectbox("Select the type of file to upload", ["Video", "Audio"])
|
|
|
|
| 92 |
# Add an "Analyze Video" button
|
| 93 |
if st.button("Analyze Video"):
|
| 94 |
with st.spinner("Processing video... Please wait."):
|
| 95 |
+
|
| 96 |
# Convert video to audio
|
| 97 |
audio_file = video_to_audio(tmp_video_path)
|
| 98 |
|
|
|
|
| 105 |
# Show the transcription
|
| 106 |
st.text_area("Transcription", transcription, height=300)
|
| 107 |
|
| 108 |
+
# Sentiment analysis
|
| 109 |
+
sentiment = analyze_sentiment(transcription)
|
| 110 |
+
st.write(f"Sentiment: {sentiment}")
|
| 111 |
+
|
| 112 |
+
# Plot the audio waveform
|
| 113 |
+
st.subheader("Audio Waveform Visualization")
|
| 114 |
+
plot_waveform(wav_audio_file)
|
| 115 |
+
|
| 116 |
# Store transcription and audio file in session state
|
| 117 |
st.session_state.transcription = transcription
|
| 118 |
|
|
|
|
| 160 |
# Add an "Analyze Audio" button
|
| 161 |
if st.button("Analyze Audio"):
|
| 162 |
with st.spinner("Processing audio... Please wait."):
|
| 163 |
+
|
| 164 |
# Convert audio to WAV if it's in MP3 format
|
| 165 |
if uploaded_audio.type == "audio/mpeg":
|
| 166 |
wav_audio_file = convert_mp3_to_wav(tmp_audio_path)
|
|
|
|
| 173 |
# Show the transcription
|
| 174 |
st.text_area("Transcription", transcription, height=300)
|
| 175 |
|
| 176 |
+
# Sentiment analysis
|
| 177 |
+
sentiment = analyze_sentiment(transcription)
|
| 178 |
+
st.write(f"Sentiment: {sentiment}")
|
| 179 |
+
|
| 180 |
+
# Plot the audio waveform
|
| 181 |
+
st.subheader("Audio Waveform Visualization")
|
| 182 |
+
plot_waveform(wav_audio_file)
|
| 183 |
+
|
| 184 |
# Store transcription in session state
|
| 185 |
st.session_state.transcription_audio = transcription
|
| 186 |
|
|
|
|
| 212 |
data=st.session_state.wav_audio_file_audio,
|
| 213 |
file_name="converted_audio_audio.wav",
|
| 214 |
mime="audio/wav"
|
| 215 |
+
)
|