Spaces:
Runtime error
Runtime error
Upload emotion.py
Browse files- emotion.py +57 -0
emotion.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from deepface import DeepFace
|
3 |
+
|
4 |
+
# Load the pre-trained emotion detection model
|
5 |
+
model = DeepFace.build_model("Emotion")
|
6 |
+
|
7 |
+
# Define emotion labels
|
8 |
+
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
9 |
+
|
10 |
+
# Load face cascade classifier
|
11 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
12 |
+
|
13 |
+
# Start capturing video
|
14 |
+
cap = cv2.VideoCapture(0)
|
15 |
+
|
16 |
+
while True:
|
17 |
+
# Capture frame-by-frame
|
18 |
+
ret, frame = cap.read()
|
19 |
+
|
20 |
+
# Convert frame to grayscale
|
21 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
22 |
+
|
23 |
+
# Detect faces in the frame
|
24 |
+
faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
25 |
+
|
26 |
+
for (x, y, w, h) in faces:
|
27 |
+
# Extract the face ROI (Region of Interest)
|
28 |
+
face_roi = gray_frame[y:y + h, x:x + w]
|
29 |
+
|
30 |
+
# Resize the face ROI to match the input shape of the model
|
31 |
+
resized_face = cv2.resize(face_roi, (48, 48), interpolation=cv2.INTER_AREA)
|
32 |
+
|
33 |
+
# Normalize the resized face image
|
34 |
+
normalized_face = resized_face / 255.0
|
35 |
+
|
36 |
+
# Reshape the image to match the input shape of the model
|
37 |
+
reshaped_face = normalized_face.reshape(1, 48, 48, 1)
|
38 |
+
|
39 |
+
# Predict emotions using the pre-trained model
|
40 |
+
preds = model.predict(reshaped_face)[0]
|
41 |
+
emotion_idx = preds.argmax()
|
42 |
+
emotion = emotion_labels[emotion_idx]
|
43 |
+
|
44 |
+
# Draw rectangle around face and label with predicted emotion
|
45 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
|
46 |
+
cv2.putText(frame, emotion, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
|
47 |
+
|
48 |
+
# Display the resulting frame
|
49 |
+
cv2.imshow('Real-time Emotion Detection', frame)
|
50 |
+
|
51 |
+
# Press 'q' to exit
|
52 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
53 |
+
break
|
54 |
+
|
55 |
+
# Release the capture and close all windows
|
56 |
+
cap.release()
|
57 |
+
cv2.destroyAllWindows()
|