Spaces:
Runtime error
Runtime error
Commit
·
d6d8b90
1
Parent(s):
a3e8d69
Added files
Browse files- app.py +174 -0
- requirements.txt +11 -0
app.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import subprocess
|
| 3 |
+
import gradio as gr
|
| 4 |
+
import whisper
|
| 5 |
+
import yt_dlp
|
| 6 |
+
import torch
|
| 7 |
+
import numpy as np
|
| 8 |
+
from moviepy.editor import VideoFileClip
|
| 9 |
+
from transformers import AutoModelForAudioClassification, AutoFeatureExtractor
|
| 10 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 11 |
+
from transformers import BlipProcessor, BlipForConditionalGeneration
|
| 12 |
+
import cv2
|
| 13 |
+
|
| 14 |
+
# Define the necessary functions
|
| 15 |
+
|
| 16 |
+
def download_youtube_video(video_url, output_path):
|
| 17 |
+
ydl_opts = {
|
| 18 |
+
'format': 'bestvideo+bestaudio',
|
| 19 |
+
'outtmpl': os.path.join(output_path, '%(title)s.%(ext)s'),
|
| 20 |
+
}
|
| 21 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
| 22 |
+
ydl.download([video_url])
|
| 23 |
+
video_info = ydl.extract_info(video_url, download=False)
|
| 24 |
+
video_title = video_info.get('title', 'video')
|
| 25 |
+
return os.path.join(output_path, f"{video_title}.webm")
|
| 26 |
+
|
| 27 |
+
def convert_to_mp4(input_path, output_path):
|
| 28 |
+
output_file = os.path.join(output_path, 'video.mp4')
|
| 29 |
+
command = ['ffmpeg', '-i', input_path, '-c', 'copy', output_file]
|
| 30 |
+
subprocess.run(command, check=True)
|
| 31 |
+
return output_file
|
| 32 |
+
|
| 33 |
+
def extract_audio_from_video(video_path):
|
| 34 |
+
video_clip = VideoFileClip(video_path)
|
| 35 |
+
audio_output = os.path.join(output_path, 'audio.mp3')
|
| 36 |
+
audio_clip = video_clip.audio
|
| 37 |
+
audio_clip.write_audiofile(audio_output)
|
| 38 |
+
return audio_output
|
| 39 |
+
|
| 40 |
+
def convert_mp3_to_wav(mp3_path):
|
| 41 |
+
from pydub import AudioSegment
|
| 42 |
+
audio = AudioSegment.from_mp3(mp3_path)
|
| 43 |
+
wav_output = os.path.join(output_path, 'audio.wav')
|
| 44 |
+
audio.export(wav_output, format="wav")
|
| 45 |
+
return wav_output
|
| 46 |
+
|
| 47 |
+
def process_text(text):
|
| 48 |
+
model_name = "cardiffnlp/twitter-roberta-base-emotion"
|
| 49 |
+
emotion_labels = ['anger', 'joy', 'optimism', 'sad']
|
| 50 |
+
|
| 51 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 52 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 53 |
+
|
| 54 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
|
| 55 |
+
with torch.no_grad():
|
| 56 |
+
outputs = model(**inputs)
|
| 57 |
+
logits = outputs.logits
|
| 58 |
+
|
| 59 |
+
emotion_probs = torch.softmax(logits, dim=-1).squeeze()
|
| 60 |
+
predicted_emotion = emotion_labels[torch.argmax(emotion_probs)]
|
| 61 |
+
|
| 62 |
+
emotion_dict = {emotion_labels[i]: emotion_probs[i].item() for i in range(len(emotion_labels))}
|
| 63 |
+
|
| 64 |
+
return emotion_dict, predicted_emotion
|
| 65 |
+
|
| 66 |
+
def preprocess_frame(frame):
|
| 67 |
+
frame = cv2.resize(frame, (224, 224))
|
| 68 |
+
pixel_values = caption_processor(images=frame, return_tensors="pt").pixel_values
|
| 69 |
+
return pixel_values
|
| 70 |
+
|
| 71 |
+
def generate_caption(pixel_values):
|
| 72 |
+
caption_ids = caption_model.generate(pixel_values)
|
| 73 |
+
caption = caption_processor.batch_decode(caption_ids, skip_special_tokens=True)[0]
|
| 74 |
+
return caption
|
| 75 |
+
|
| 76 |
+
def predict_emotions(caption):
|
| 77 |
+
inputs = emotion_tokenizer(caption, return_tensors='pt', truncation=True, padding=True)
|
| 78 |
+
outputs = emotion_model(**inputs)
|
| 79 |
+
|
| 80 |
+
emotion_probs = torch.softmax(outputs.logits, dim=1)
|
| 81 |
+
|
| 82 |
+
predicted_emotions = {label: prob.item() for label, prob in zip(emotion_labels, emotion_probs[0])}
|
| 83 |
+
|
| 84 |
+
return predicted_emotions
|
| 85 |
+
|
| 86 |
+
# Load models and processors once at the start
|
| 87 |
+
caption_model_name = "Salesforce/blip-image-captioning-base"
|
| 88 |
+
caption_processor = BlipProcessor.from_pretrained(caption_model_name)
|
| 89 |
+
caption_model = BlipForConditionalGeneration.from_pretrained(caption_model_name)
|
| 90 |
+
|
| 91 |
+
emotion_model_name = "j-hartmann/emotion-english-distilroberta-base"
|
| 92 |
+
emotion_tokenizer = AutoTokenizer.from_pretrained(emotion_model_name)
|
| 93 |
+
emotion_model = AutoModelForSequenceClassification.from_pretrained(emotion_model_name)
|
| 94 |
+
|
| 95 |
+
# Gradio Interface Function
|
| 96 |
+
def analyze_video(video_url):
|
| 97 |
+
# Set output path for downloads
|
| 98 |
+
global output_path
|
| 99 |
+
output_path = './'
|
| 100 |
+
|
| 101 |
+
# Download the video
|
| 102 |
+
video_path = download_youtube_video(video_url, output_path)
|
| 103 |
+
|
| 104 |
+
# Convert to mp4 format
|
| 105 |
+
mp4_path = convert_to_mp4(video_path, output_path)
|
| 106 |
+
|
| 107 |
+
# Extract audio from the video
|
| 108 |
+
audio_path = extract_audio_from_video(mp4_path)
|
| 109 |
+
|
| 110 |
+
# Convert audio to wav format for processing
|
| 111 |
+
audio_wav_path = convert_mp3_to_wav(audio_path)
|
| 112 |
+
|
| 113 |
+
# Process the audio using Whisper for transcription
|
| 114 |
+
model_whisper = whisper.load_model("base")
|
| 115 |
+
|
| 116 |
+
result_whisper = model_whisper.transcribe(audio_wav_path)
|
| 117 |
+
|
| 118 |
+
transcript = result_whisper['text']
|
| 119 |
+
|
| 120 |
+
# Process text to get emotions
|
| 121 |
+
emotion_dict_text, predicted_emotion_text = process_text(transcript)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
# Process the video using image captioning and emotion recognition
|
| 125 |
+
n_frame_interval = 60 # Process every 60th frame
|
| 126 |
+
emotion_vectors_video = []
|
| 127 |
+
|
| 128 |
+
# Process the video frames for emotions using BLIP model
|
| 129 |
+
video_capture = cv2.VideoCapture(mp4_path)
|
| 130 |
+
total_frames_video = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 131 |
+
|
| 132 |
+
frame_count_video = 0
|
| 133 |
+
|
| 134 |
+
while video_capture.isOpened():
|
| 135 |
+
ret_video, frame_video = video_capture.read()
|
| 136 |
+
|
| 137 |
+
if not ret_video or frame_count_video > total_frames_video:
|
| 138 |
+
break
|
| 139 |
+
|
| 140 |
+
if frame_count_video % n_frame_interval == 0:
|
| 141 |
+
pixel_values_video = preprocess_frame(frame_video)
|
| 142 |
+
caption_video = generate_caption(pixel_values_video)
|
| 143 |
+
predicted_emotions_video, _ = predict_emotions(caption_video)
|
| 144 |
+
|
| 145 |
+
# Collect emotion vectors from frames
|
| 146 |
+
emotion_vectors_video.append(np.array(list(predicted_emotions_video.values())))
|
| 147 |
+
|
| 148 |
+
frame_count_video += 1
|
| 149 |
+
|
| 150 |
+
video_capture.release()
|
| 151 |
+
|
| 152 |
+
# Aggregate results from video frames
|
| 153 |
+
average_emotion_vector_video = np.mean(emotion_vectors_video, axis=0)
|
| 154 |
+
|
| 155 |
+
# Combine text and video emotion results
|
| 156 |
+
combined_emotion_vector_final= np.concatenate((np.array(list(emotion_dict_text.values())), average_emotion_vector_video))
|
| 157 |
+
|
| 158 |
+
final_most_predicted_index= np.argmax(combined_emotion_vector_final)
|
| 159 |
+
|
| 160 |
+
final_most_predicted_emotion= list(emotion_dict_text.keys())[final_most_predicted_index]
|
| 161 |
+
|
| 162 |
+
return transcript, predicted_emotion_text, final_most_predicted_emotion
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
# Create Gradio interface
|
| 166 |
+
iface= gr.Interface(fn=analyze_video,
|
| 167 |
+
inputs=gr.Textbox(label="YouTube Video URL"),
|
| 168 |
+
outputs=["text", "text", "text"],
|
| 169 |
+
title="Multimodal Emotion Recognition",
|
| 170 |
+
description="Enter a YouTube Video URL to analyze emotions from both audio and visual content.")
|
| 171 |
+
|
| 172 |
+
# Launch the app
|
| 173 |
+
if __name__ == "__main__":
|
| 174 |
+
iface.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==3.0.0
|
| 2 |
+
pytube==10.0.0
|
| 3 |
+
pydub==0.25.1
|
| 4 |
+
transformers==4.39.0
|
| 5 |
+
torchaudio==0.13.0
|
| 6 |
+
librosa==0.9.2
|
| 7 |
+
moviepy==1.0.3
|
| 8 |
+
openai-whisper==1.0.0
|
| 9 |
+
yt-dlp==2023.03.01
|
| 10 |
+
torch==1.12.1
|
| 11 |
+
opencv-python==4.5.5
|