Garvitj commited on
Commit
a22f055
·
verified ·
1 Parent(s): 2ffd189

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -114
app.py CHANGED
@@ -1,140 +1,127 @@
1
  import gradio as gr
2
  import numpy as np
3
- import librosa
4
  import cv2
5
- import json
6
- import ffmpeg
 
 
7
  import speech_recognition as sr
8
- from transformers import AutoModelForCausalLM, AutoTokenizer
9
- from tensorflow.keras.preprocessing.text import tokenizer_from_json
10
  from tensorflow.keras.models import load_model
 
11
  from tensorflow.keras.preprocessing.sequence import pad_sequences
12
- from tensorflow.keras.preprocessing.image import img_to_array
13
  from collections import Counter
14
- import os
15
 
16
- # Load necessary models and files
17
- text_model = load_model('model_for_text_emotion_updated(1).keras') # Text emotion model
18
  with open('tokenizer.json') as json_file:
19
- tokenizer = tokenizer_from_json(json.load(json_file)) # Tokenizer for text emotion
20
- audio_model = load_model('my_model.h5') # Audio emotion model
21
- image_model = load_model('model_emotion.h5') # Image emotion model
22
-
23
- # Load LLM model from Hugging Face
24
- llama_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") # Small OPT model
25
- llama_tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
26
-
27
- # Emotion mapping
28
- emotion_mapping = {0: "anger", 1: "disgust", 2: "fear", 3: "joy", 4: "neutral", 5: "sadness", 6: "surprise"}
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  # Preprocess text for emotion prediction
31
  def preprocess_text(text):
32
- tokens = [word for word in text.lower().split() if word.isalnum()]
33
- return ' '.join(tokens)
34
-
35
- # Predict emotion from text
36
- def predict_text_emotion(text):
37
- preprocessed_text = preprocess_text(text)
38
- seq = tokenizer.texts_to_sequences([preprocessed_text])
39
- padded_seq = pad_sequences(seq, maxlen=35)
40
- prediction = text_model.predict(padded_seq)
41
- emotion_index = np.argmax(prediction)
42
- return emotion_mapping[emotion_index]
43
 
44
  # Extract audio features and predict emotion
45
- def extract_audio_features(audio_data, sample_rate):
46
- if not isinstance(audio_data, np.ndarray):
47
- audio_data = np.array(audio_data, dtype=np.float32) # Ensure it's a NumPy array with float type
48
- else:
49
- audio_data = audio_data.astype(np.float32) # Convert to float32
50
-
51
- mfcc = librosa.feature.mfcc(y=audio_data, sr=sample_rate, n_mfcc=704)
52
- mfcc = np.mean(mfcc.T, axis=0) # Compute mean across time
53
- features = np.expand_dims(mfcc, axis=0) # Add batch dimension
54
- return features
55
-
56
-
57
- def predict_audio_emotion(audio_data, sample_rate):
58
- features = extract_audio_features(audio_data, sample_rate)
59
- features = np.reshape(features, (1, 40)) # Match model expected input
60
- prediction = audio_model.predict(features)
61
  emotion_index = np.argmax(prediction)
62
- return emotion_mapping[emotion_index]
63
-
64
- # Process video and predict emotions from frames
65
- def process_video(video_path):
66
- cap = cv2.VideoCapture(video_path)
67
- frame_rate = cap.get(cv2.CAP_PROP_FPS)
68
- predictions = []
69
-
70
- while cap.isOpened():
71
- ret, frame = cap.read()
72
- if not ret:
73
- break
74
- if int(cap.get(cv2.CAP_PROP_POS_FRAMES)) % int(frame_rate) == 0:
75
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
76
- frame = cv2.resize(frame, (48, 48))
77
- frame = img_to_array(frame) / 255.0
78
- frame = np.expand_dims(frame, axis=0)
79
- prediction = image_model.predict(frame)
80
- predictions.append(np.argmax(prediction))
81
-
82
- cap.release()
83
- most_common_emotion = Counter(predictions).most_common(1)[0][0]
84
- return emotion_mapping[most_common_emotion]
85
-
86
- # Extract audio from video using ffmpeg-python
87
- def extract_audio_from_video(video_path):
88
- audio_file = 'audio.wav'
89
- (ffmpeg
90
- .input(video_path)
91
- .output(audio_file, format='wav', acodec='pcm_s16le')
92
- .run(overwrite_output=True))
93
- return audio_file
94
 
95
- def transcribe_audio(audio_file):
 
96
  recognizer = sr.Recognizer()
97
- with sr.AudioFile(audio_file) as source:
98
- audio_record = recognizer.record(source)
99
- return recognizer.recognize_google(audio_record)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
- # Integrating with LLM to adjust responses based on detected emotion
102
- def interact_with_llm(emotion, user_input):
103
- prompt = f"The user is feeling {emotion}. Respond to their question in an empathetic and appropriate manner: {user_input}"
104
 
105
- inputs = llama_tokenizer(prompt, return_tensors="pt")
106
- outputs = llama_model.generate(**inputs, max_length=200)
107
- response = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
108
 
109
- return response
110
-
111
- # Main function to process video and predict emotions
112
- def transcribe_and_predict_video(video_path):
113
- # Extract audio from video and predict text-based emotion
114
- audio_file = extract_audio_from_video(video_path)
115
- text = transcribe_audio(audio_file)
116
- text_emotion = predict_text_emotion(text)
117
-
118
- # Predict emotion from video frames (image-based)
119
- image_emotion = process_video(video_path)
120
-
121
- # Predict emotion from audio (sound-based)
122
- audio_data, sample_rate = librosa.load(audio_file, sr=None)
123
- audio_emotion = predict_audio_emotion(audio_data, sample_rate)
124
-
125
- # Combine detected emotions for final output (majority voting can be implemented)
126
- final_emotion = image_emotion # Using image emotion as primary
127
 
128
- # Get response from LLM
129
- llm_response = interact_with_llm(final_emotion, text)
 
130
 
131
- return f"Emotion Detected: {final_emotion}\nLLM Response: {llm_response}"
 
 
 
 
132
 
133
- # Create Gradio interface
134
- iface = gr.Interface(fn=transcribe_and_predict_video,
135
- inputs=gr.Video(),
136
  outputs="text",
137
- title="Emotion-Responsive LLM for Video",
138
- description="Upload a video to get emotion predictions and LLM responses based on detected emotions.")
139
 
140
  iface.launch()
 
1
  import gradio as gr
2
  import numpy as np
 
3
  import cv2
4
+ import librosa
5
+ import tempfile
6
+ import wave
7
+ import os
8
  import speech_recognition as sr
9
+ import pickle
10
+ import json
11
  from tensorflow.keras.models import load_model
12
+ from tensorflow.keras.preprocessing.text import tokenizer_from_json
13
  from tensorflow.keras.preprocessing.sequence import pad_sequences
14
+ import nltk
15
  from collections import Counter
16
+ from transformers import LlamaTokenizer, LlamaForCausalLM
17
 
18
+ # Initialize necessary models and tools
19
+ # Load the tokenizer and model for text-based emotion prediction
20
  with open('tokenizer.json') as json_file:
21
+ tokenizer_json = json.load(json_file)
22
+ tokenizer = tokenizer_from_json(tokenizer_json)
23
+ text_model = load_model('model_for_text_emotion_updated(1).keras')
24
+
25
+ # Load the audio emotion model and scaler
26
+ with open('encoder.pkl', 'rb') as file:
27
+ encoder = pickle.load(file)
28
+ with open('scaler.pkl', 'rb') as file:
29
+ scaler = pickle.load(file)
30
+ audio_model = load_model('my_model.h5')
31
+
32
+ # Load the LLaMA model for question answering
33
+ llama_tokenizer = LlamaTokenizer.from_pretrained('huggingface/llama-7b')
34
+ llama_model = LlamaForCausalLM.from_pretrained('huggingface/llama-7b')
35
+
36
+ # Initialize NLTK tools
37
+ nltk.download('punkt')
38
+ nltk.download('wordnet')
39
+ nltk.download('stopwords')
40
+ lemmatizer = nltk.WordNetLemmatizer()
41
+ stop_words = set(nltk.corpus.stopwords.words('english'))
42
 
43
  # Preprocess text for emotion prediction
44
  def preprocess_text(text):
45
+ tokens = nltk.word_tokenize(text.lower())
46
+ tokens = [word for word in tokens if word.isalnum() and word not in stop_words]
47
+ lemmatized_tokens = [lemmatizer.lemmatize(word) for word in tokens]
48
+ return ' '.join(lemmatized_tokens)
 
 
 
 
 
 
 
49
 
50
  # Extract audio features and predict emotion
51
+ def extract_audio_features(data, sample_rate):
52
+ result = np.array([])
53
+ zcr = np.mean(librosa.feature.zero_crossing_rate(y=data).T, axis=0)
54
+ result = np.hstack((result, zcr))
55
+ mfcc = np.mean(librosa.feature.mfcc(y=data, sr=sample_rate).T, axis=0)
56
+ result = np.hstack((result, mfcc))
57
+ return result
58
+
59
+ def predict_emotion_from_audio(audio_data):
60
+ sample_rate, data = audio_data
61
+ features = extract_audio_features(data, sample_rate)
62
+ features = np.expand_dims(features, axis=0)
63
+ scaled_features = scaler.transform(features)
64
+ prediction = audio_model.predict(scaled_features)
 
 
65
  emotion_index = np.argmax(prediction)
66
+ emotion_array = np.zeros((1, len(encoder.categories_[0])))
67
+ emotion_array[0, emotion_index] = 1
68
+ emotion_label = encoder.inverse_transform(emotion_array)[0]
69
+ return emotion_label
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
+ # Extract text from audio (speech recognition)
72
+ def extract_text_from_audio(audio_path):
73
  recognizer = sr.Recognizer()
74
+ with sr.AudioFile(audio_path) as source:
75
+ audio_data = recognizer.record(source)
76
+ text = recognizer.recognize_google(audio_data)
77
+ return text
78
+
79
+ # Use LLaMA to answer questions based on the text
80
+ def ask_llama(question, context):
81
+ inputs = llama_tokenizer(question, context, return_tensors="pt")
82
+ outputs = llama_model.generate(inputs['input_ids'], max_length=150)
83
+ answer = llama_tokenizer.decode(outputs[0], skip_special_tokens=True)
84
+ return answer
85
+
86
+ # Process the video and extract text, emotion, and context for LLaMA
87
+ def process_video(video_path):
88
+ # Extract audio from the video
89
+ video = mp.VideoFileClip(video_path)
90
+ if video.audio is None:
91
+ raise ValueError("No audio found in the video.")
92
+
93
+ audio = video.audio
94
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio_file:
95
+ temp_audio_path = temp_audio_file.name
96
+ audio.write_audiofile(temp_audio_path)
97
 
98
+ # Extract text from the audio
99
+ video_text = extract_text_from_audio(temp_audio_path)
 
100
 
101
+ # Predict emotions from the text and audio
102
+ preprocessed_text = preprocess_text(video_text)
103
+ title_seq = tokenizer.texts_to_sequences([preprocessed_text])
104
+ padded_title_seq = pad_sequences(title_seq, maxlen=35, padding='post', truncating='post')
105
+ text_emotion_prediction = text_model.predict(np.array(padded_title_seq))
106
+ text_emotion = ['anger', 'disgust', 'fear', 'joy', 'neutral', 'sadness', 'surprise'][np.argmax(text_emotion_prediction)]
107
 
108
+ audio_data = audio.to_soundarray(fps=audio.fps)
109
+ audio_emotion = predict_emotion_from_audio((audio.fps, audio_data))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
+ # Answer user queries based on the video text
112
+ context = video_text
113
+ return context, text_emotion, audio_emotion
114
 
115
+ # Define Gradio Interface
116
+ def video_query_interface(video, question):
117
+ context, text_emotion, audio_emotion = process_video(video)
118
+ answer = ask_llama(question, context)
119
+ return f"Text Emotion: {text_emotion}, Audio Emotion: {audio_emotion}\nAnswer: {answer}"
120
 
121
+ iface = gr.Interface(fn=video_query_interface,
122
+ inputs=[gr.Video(), gr.Textbox()],
 
123
  outputs="text",
124
+ title="Video Emotion and Q&A",
125
+ description="Upload a video and ask a question based on the audio content.")
126
 
127
  iface.launch()