|
import streamlit as st |
|
import moviepy.editor as mp |
|
import speech_recognition as sr |
|
from pydub import AudioSegment |
|
import tempfile |
|
import os |
|
import io |
|
from transformers import pipeline |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
def video_to_audio(video_file): |
|
|
|
video = mp.VideoFileClip(video_file) |
|
|
|
|
|
audio = video.audio |
|
temp_audio_path = tempfile.mktemp(suffix=".mp3") |
|
|
|
|
|
audio.write_audiofile(temp_audio_path) |
|
return temp_audio_path |
|
|
|
|
|
def convert_mp3_to_wav(mp3_file): |
|
|
|
audio = AudioSegment.from_mp3(mp3_file) |
|
|
|
|
|
temp_wav_path = tempfile.mktemp(suffix=".wav") |
|
|
|
|
|
audio.export(temp_wav_path, format="wav") |
|
return temp_wav_path |
|
|
|
|
|
def transcribe_audio(audio_file): |
|
|
|
recognizer = sr.Recognizer() |
|
|
|
|
|
audio = sr.AudioFile(audio_file) |
|
|
|
with audio as source: |
|
audio_data = recognizer.record(source) |
|
|
|
try: |
|
|
|
text = recognizer.recognize_google(audio_data) |
|
return text |
|
except sr.UnknownValueError: |
|
return "Audio could not be understood." |
|
except sr.RequestError: |
|
return "Could not request results from Google Speech Recognition service." |
|
|
|
|
|
def detect_emotion(text): |
|
|
|
emotion_pipeline = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True) |
|
|
|
|
|
result = emotion_pipeline(text) |
|
|
|
|
|
emotions = {emotion['label']: emotion['score'] for emotion in result[0]} |
|
return emotions |
|
|
|
|
|
st.title("Video and Audio to Text Transcription with Emotion Detection and Visualization") |
|
st.write("Upload a video or audio file to convert it to transcription, detect emotions, and visualize the audio waveform.") |
|
|
|
|
|
tab = st.selectbox("Select the type of file to upload", ["Video", "Audio"]) |
|
|
|
if tab == "Video": |
|
|
|
uploaded_video = st.file_uploader("Upload Video", type=["mp4", "mov", "avi"]) |
|
|
|
if uploaded_video is not None: |
|
|
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_video: |
|
tmp_video.write(uploaded_video.read()) |
|
tmp_video_path = tmp_video.name |
|
|
|
|
|
if st.button("Analyze Video"): |
|
with st.spinner("Processing video... Please wait."): |
|
|
|
|
|
audio_file = video_to_audio(tmp_video_path) |
|
|
|
|
|
wav_audio_file = convert_mp3_to_wav(audio_file) |
|
|
|
|
|
transcription = transcribe_audio(wav_audio_file) |
|
|
|
|
|
st.text_area("Transcription", transcription, height=300) |
|
|
|
|
|
emotions = detect_emotion(transcription) |
|
st.write(f"Detected Emotions: {emotions}") |
|
|
|
|
|
st.session_state.transcription = transcription |
|
|
|
|
|
with open(wav_audio_file, "rb") as f: |
|
audio_data = f.read() |
|
st.session_state.wav_audio_file = io.BytesIO(audio_data) |
|
|
|
|
|
os.remove(tmp_video_path) |
|
os.remove(audio_file) |
|
|
|
|
|
if 'transcription' in st.session_state and 'wav_audio_file' in st.session_state: |
|
|
|
st.audio(st.session_state.wav_audio_file, format='audio/wav') |
|
|
|
|
|
|
|
st.download_button( |
|
label="Download Transcription", |
|
data=st.session_state.transcription, |
|
file_name="transcription.txt", |
|
mime="text/plain" |
|
) |
|
|
|
|
|
st.download_button( |
|
label="Download Audio", |
|
data=st.session_state.wav_audio_file, |
|
file_name="converted_audio.wav", |
|
mime="audio/wav" |
|
) |
|
|
|
elif tab == "Audio": |
|
|
|
uploaded_audio = st.file_uploader("Upload Audio", type=["wav", "mp3"]) |
|
|
|
if uploaded_audio is not None: |
|
|
|
with tempfile.NamedTemporaryFile(delete=False) as tmp_audio: |
|
tmp_audio.write(uploaded_audio.read()) |
|
tmp_audio_path = tmp_audio.name |
|
|
|
|
|
if st.button("Analyze Audio"): |
|
with st.spinner("Processing audio... Please wait."): |
|
|
|
|
|
if uploaded_audio.type == "audio/mpeg": |
|
wav_audio_file = convert_mp3_to_wav(tmp_audio_path) |
|
else: |
|
wav_audio_file = tmp_audio_path |
|
|
|
|
|
transcription = transcribe_audio(wav_audio_file) |
|
|
|
|
|
st.text_area("Transcription", transcription, height=300) |
|
|
|
|
|
emotions = detect_emotion(transcription) |
|
st.write(f"Detected Emotions: {emotions}") |
|
|
|
|
|
st.session_state.transcription_audio = transcription |
|
|
|
|
|
with open(wav_audio_file, "rb") as f: |
|
audio_data = f.read() |
|
st.session_state.wav_audio_file_audio = io.BytesIO(audio_data) |
|
|
|
|
|
os.remove(tmp_audio_path) |
|
|
|
|
|
if 'transcription_audio' in st.session_state and 'wav_audio_file_audio' in st.session_state: |
|
|
|
st.audio(st.session_state.wav_audio_file_audio, format='audio/wav') |
|
|
|
|
|
|
|
st.download_button( |
|
label="Download Transcription", |
|
data=st.session_state.transcription_audio, |
|
file_name="transcription_audio.txt", |
|
mime="text/plain" |
|
) |
|
|
|
|
|
st.download_button( |
|
label="Download Audio", |
|
data=st.session_state.wav_audio_file_audio, |
|
file_name="converted_audio_audio.wav", |
|
mime="audio/wav" |
|
) |