import cv2 import numpy as np import os import onnxruntime as ort import streamlit as st from PIL import Image # Preprocess image to match model input requirements def preprocess_image(image, face_landmarks=None): image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert image to grayscale image_resized = cv2.resize(image, (48, 48)) # Resize image to 48x48 image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension image_input = np.expand_dims(image_input, axis=0) # Add channel dimension (for grayscale) image_input = image_input.astype(np.float32) / 255.0 # Normalize the image return image_input # Check if smile is present in the facial landmarks def check_for_smile(face_landmarks): """Simple rule to check for smile based on landmarks""" mouth = face_landmarks['bottom_lip'] + face_landmarks['top_lip'] mouth_distance = np.linalg.norm(np.array(mouth[0]) - np.array(mouth[-1])) if mouth_distance > 30: # This threshold might need adjustment return True return False # Display emotion with post-processing to check for smiles def display_emotion(emotion): """Map predicted emotion index to a label""" emotion_map = { 0: "Anger", 1: "Disgust", 2: "Fear", 3: "Happiness", 4: "Sadness", 5: "Surprise", 6: "Neutral" } return emotion_map.get(emotion, "Unknown") # Display emotion with smile detection def display_emotion_with_smile(emotion, face_landmarks=None): if emotion == 6 and face_landmarks: # 'Neutral' is typically 6 in the emotion_map if check_for_smile(face_landmarks): return "Happiness" # Override neutral with happiness if a smile is detected return display_emotion(emotion) # Otherwise return the normal emotion # Predict emotion with smile detection def predict_emotion_with_smile(image_input, face_landmarks=None): """Run inference and predict the emotion, considering smile detection""" emotion = predict_emotion(image_input) # Normal emotion prediction emotion_label = display_emotion_with_smile(emotion, face_landmarks) return emotion_label # Load ONNX model def load_model(): model_path = 'onnx_model.onnx' # Make sure this is the correct path if not os.path.exists(model_path): raise FileNotFoundError(f"Model file {model_path} not found!") emotion_model = ort.InferenceSession(model_path) return emotion_model # Predict emotion using the ONNX model def predict_emotion(image_input): emotion_model = load_model() input_name = emotion_model.get_inputs()[0].name output_name = emotion_model.get_outputs()[0].name prediction = emotion_model.run([output_name], {input_name: image_input}) return np.argmax(prediction[0]) # Streamlit app code st.title("Emotion Recognition App") # Upload an image uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) # If an image is uploaded if uploaded_file is not None: # Open and display the uploaded image image = Image.open(uploaded_file) st.image(image, caption="Uploaded Image", use_column_width=True) # Preprocess the image image_input = preprocess_image(image) # Predict the emotion emotion_label = predict_emotion_with_smile(image_input) # Display the predicted emotion st.write(f"Detected Emotion: {emotion_label}")