|
import cv2
|
|
import numpy as np
|
|
from keras.models import model_from_json
|
|
from collections import Counter
|
|
import time
|
|
|
|
emotion_dict = {0: "Happy", 1: "Neutral/Sad", 2: "Sad"}
|
|
detected_emotions = []
|
|
|
|
|
|
def reset_detected_emotions():
|
|
global detected_emotions
|
|
detected_emotions = []
|
|
|
|
|
|
def process_frame(cap2, emotion_model):
|
|
global detected_emotions
|
|
ret, frame = cap2.read()
|
|
frame = cv2.resize(frame, (1280, 720))
|
|
|
|
face_detector = cv2.CascadeClassifier('emotion/haarcascades/haarcascade_frontalface_default.xml')
|
|
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
|
|
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
|
|
|
|
for (x, y, w, h) in num_faces:
|
|
roi_gray_frame = gray_frame[y:y + h, x:x + w]
|
|
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
|
|
|
|
emotion_prediction = emotion_model.predict(cropped_img)
|
|
maxindex = int(np.argmax(emotion_prediction))
|
|
detected_emotions.append(emotion_dict[maxindex])
|
|
|
|
|
|
def get_most_common_emotion():
|
|
global detected_emotions
|
|
if detected_emotions:
|
|
counter = Counter(detected_emotions)
|
|
most_common_emotion = counter.most_common(1)[0][0]
|
|
return most_common_emotion
|
|
else:
|
|
return None
|
|
|
|
def call_me():
|
|
|
|
json_file = open('emotion/model/emotion_model.json', 'r')
|
|
loaded_model_json = json_file.read()
|
|
json_file.close()
|
|
emotion_model = model_from_json(loaded_model_json)
|
|
emotion_model.load_weights("emotion/model/emotion_model.h5")
|
|
print("Loaded model from disk")
|
|
|
|
|
|
cap2 = cv2.VideoCapture(0)
|
|
|
|
duration = 5
|
|
end_time = time.time() + duration
|
|
|
|
|
|
while time.time() < end_time:
|
|
process_frame(cap2, emotion_model)
|
|
|
|
cap2.release()
|
|
|
|
cv2.destroyAllWindows()
|
|
|
|
|
|
most_common_emotion = get_most_common_emotion()
|
|
return most_common_emotion
|
|
|
|
|
|
|
|
|
|
|