File size: 2,845 Bytes
46274ff
c096457
bb9fde7
c096457
46274ff
c096457
 
 
 
 
 
 
 
af88cea
 
 
 
 
 
 
 
 
c096457
1effd41
 
 
 
 
e36cb90
1effd41
 
c096457
 
1effd41
 
 
c096457
1effd41
 
 
 
 
 
 
af88cea
 
 
 
 
 
 
 
 
 
 
1effd41
af88cea
 
 
 
 
1effd41
af88cea
aa1d293
1effd41
af88cea
1effd41
 
af88cea
aa1d293
1effd41
 
 
 
 
 
 
 
 
af88cea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import streamlit as st
import cv2
import numpy as np
from PIL import Image

# Set the page config
st.set_page_config(page_title="Emotion Recognition App", layout="centered")

st.title("Emotion Recognition App")

# Upload an image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])

# Load OpenCV's face detection model
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")

# Load ONNX emotion detection model
emotion_model_path = "emotion_recognition.onnx"  # Replace with your model path
emotion_net = cv2.dnn.readNetFromONNX(emotion_model_path)

# Emotion labels (based on model documentation)
emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]

# Resize image to reduce memory usage
def resize_image(image, max_size=(800, 800)):
    """
    Resizes the image to the specified maximum size while maintaining aspect ratio.
    """
    image.thumbnail(max_size, Image.Resampling.LANCZOS)
    return image

# Process the uploaded image
if uploaded_file is not None:
    # Check file size to prevent loading large images
    if uploaded_file.size > 10 * 1024 * 1024:  # 10 MB limit
        st.error("File too large. Please upload an image smaller than 10 MB.")
    else:
        # Open and resize the image
        image = Image.open(uploaded_file)
        image = resize_image(image)

        # Convert image to numpy array
        image_np = np.array(image)

        # Convert image to grayscale for face detection
        gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)

        # Detect faces
        faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

        if len(faces) > 0:
            for (x, y, w, h) in faces:
                # Extract face ROI
                face_roi = image_np[y:y+h, x:x+w]
                face_blob = cv2.dnn.blobFromImage(face_roi, 1.0, (64, 64), (104, 117, 123), swapRB=True)

                # Predict emotion
                emotion_net.setInput(face_blob)
                predictions = emotion_net.forward()
                emotion_idx = np.argmax(predictions)
                emotion = emotion_labels[emotion_idx]

                # Draw rectangle around the face
                cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2)

                # Display emotion
                cv2.putText(
                    image_np,
                    emotion,
                    (x, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.9,
                    (255, 0, 0),
                    2,
                )

            # Display the processed image
            st.image(image_np, caption="Processed Image", use_column_width=True)
        else:
            st.warning("No faces detected in the image.")