File size: 2,916 Bytes
46274ff
c096457
46274ff
c096457
 
46274ff
c096457
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1effd41
c096457
 
 
1effd41
 
 
 
 
 
 
 
c096457
 
1effd41
 
 
c096457
1effd41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import streamlit as st
import face_recognition
import numpy as np
import cv2
from PIL import Image

# Set the page config
st.set_page_config(page_title="Emotion Recognition App", layout="centered")

st.title("Emotion Recognition App")

# Upload an image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])

# Define simple emotion mapping based on facial features (for demonstration purposes)
def detect_emotion(face_landmarks):
    """
    A simple mock-up function for detecting emotions based on landmarks.
    Replace with a more sophisticated model as needed.
    """
    if face_landmarks:
        # Example: Assign "Happy" if eyes are close together
        return "Happy"
    return "Neutral"

# Resize image to reduce memory usage
def resize_image(image, max_size=(800, 800)):
    """
    Resizes the image to the specified maximum size while maintaining aspect ratio.
    """
    image.thumbnail(max_size, Image.ANTIALIAS)
    return image

# Process the uploaded image
if uploaded_file is not None:
    # Check file size to prevent loading large images
    if uploaded_file.size > 10 * 1024 * 1024:  # 10 MB limit
        st.error("File too large. Please upload an image smaller than 10 MB.")
    else:
        # Open and resize the image
        image = Image.open(uploaded_file)
        image = resize_image(image)

        # Convert image to numpy array
        image_np = np.array(image)

        # Convert image to RGB (ensure compatibility with face_recognition)
        if len(image_np.shape) == 3 and image_np.shape[2] == 4:  # RGBA to RGB
            image_np = cv2.cvtColor(image_np, cv2.COLOR_RGBA2RGB)
        elif len(image_np.shape) == 3 and image_np.shape[2] == 3:  # BGR to RGB
            image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)

        # Detect faces and landmarks
        face_locations = face_recognition.face_locations(image_np)
        face_landmarks_list = face_recognition.face_landmarks(image_np)

        if face_locations:
            for face_location, face_landmarks in zip(face_locations, face_landmarks_list):
                # Draw a rectangle around the face
                top, right, bottom, left = face_location
                cv2.rectangle(image_np, (left, top), (right, bottom), (0, 255, 0), 2)

                # Detect emotion based on landmarks
                emotion = detect_emotion(face_landmarks)

                # Display emotion above the face
                cv2.putText(
                    image_np,
                    emotion,
                    (left, top - 10),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.9,
                    (255, 0, 0),
                    2,
                )

            # Display the processed image
            st.image(image_np, caption="Processed Image", use_column_width=True)
        else:
            st.warning("No faces detected in the image.")