Spaces:
Sleeping
Sleeping
import streamlit as st | |
import cv2 | |
import numpy as np | |
from PIL import Image | |
# Set the page config | |
st.set_page_config(page_title="Emotion Recognition App", layout="centered") | |
st.title("Emotion Recognition App") | |
# Upload an image | |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) | |
# Load OpenCV's face detection model | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") | |
# Load ONNX emotion detection model | |
emotion_model_path = "emotion_recognition.onnx" # Replace with your model path | |
emotion_net = cv2.dnn.readNetFromONNX(emotion_model_path) | |
# Emotion labels (based on model documentation) | |
emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"] | |
# Resize image to reduce memory usage | |
def resize_image(image, max_size=(800, 800)): | |
""" | |
Resizes the image to the specified maximum size while maintaining aspect ratio. | |
""" | |
image.thumbnail(max_size, Image.Resampling.LANCZOS) | |
return image | |
# Process the uploaded image | |
if uploaded_file is not None: | |
# Check file size to prevent loading large images | |
if uploaded_file.size > 10 * 1024 * 1024: # 10 MB limit | |
st.error("File too large. Please upload an image smaller than 10 MB.") | |
else: | |
# Open and resize the image | |
image = Image.open(uploaded_file) | |
image = resize_image(image) | |
# Convert image to numpy array | |
image_np = np.array(image) | |
# Convert image to grayscale for face detection | |
gray_image = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY) | |
# Detect faces | |
faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) | |
if len(faces) > 0: | |
for (x, y, w, h) in faces: | |
# Extract face ROI | |
face_roi = image_np[y:y+h, x:x+w] | |
face_blob = cv2.dnn.blobFromImage(face_roi, 1.0, (64, 64), (104, 117, 123), swapRB=True) | |
# Predict emotion | |
emotion_net.setInput(face_blob) | |
predictions = emotion_net.forward() | |
emotion_idx = np.argmax(predictions) | |
emotion = emotion_labels[emotion_idx] | |
# Draw rectangle around the face | |
cv2.rectangle(image_np, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
# Display emotion | |
cv2.putText( | |
image_np, | |
emotion, | |
(x, y - 10), | |
cv2.FONT_HERSHEY_SIMPLEX, | |
0.9, | |
(255, 0, 0), | |
2, | |
) | |
# Display the processed image | |
st.image(image_np, caption="Processed Image", use_column_width=True) | |
else: | |
st.warning("No faces detected in the image.") | |