Spaces:
Sleeping
Sleeping
File size: 2,822 Bytes
1f130ae 46e2aed 412adf0 46e2aed 9a5dd5f 46e2aed 5cde790 46e2aed 1f130ae 46e2aed 4bec973 46e2aed 7ffdd20 1effd41 46e2aed 1f130ae 46e2aed 93e10cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import cv2
import numpy as np
import onnxruntime as ort
import streamlit as st
from PIL import Image
# Load the ONNX model
def load_model(model_path='onnx_model.onnx'):
# Load the ONNX model
model = ort.InferenceSession(model_path)
return model
# Preprocess the image
def preprocess_image(image):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR (OpenCV format)
# Resize the image to 48x48 as per the error message (model's expected input size)
image_resized = cv2.resize(image, (48, 48)) # Resize to 48x48
# Convert to grayscale if the model expects a single channel
image_gray = cv2.cvtColor(image_resized, cv2.COLOR_BGR2GRAY) # Convert to grayscale
# If the model expects 3 channels, keep the image in RGB (3 channels)
# image_resized = cv2.cvtColor(image_resized, cv2.COLOR_BGR2RGB) # For RGB input
# Add batch dimension
image_input = np.expand_dims(image_gray, axis=0) # Add batch dimension
image_input = np.expand_dims(image_input, axis=0) # Add channel dimension (for grayscale)
image_input = image_input.astype(np.float32) / 255.0 # Normalize the image
return image_input
# Map the raw output to emotions
def get_emotion_from_output(output):
emotion_labels = ['Anger', 'Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise', 'Neutral']
# Get the index of the highest value in the output (i.e., predicted emotion)
emotion_index = np.argmax(output)
confidence = output[0][emotion_index] # Confidence of the prediction
emotion = emotion_labels[emotion_index] # Corresponding emotion label
return emotion, confidence
# Predict emotion using the ONNX model
def predict_emotion_onnx(model, image_input):
# Get the input name and output name for the ONNX model
input_name = model.get_inputs()[0].name
output_name = model.get_outputs()[0].name
# Run the model
prediction = model.run([output_name], {input_name: image_input})
return prediction[0]
# Streamlit UI
st.title("Emotion Detection")
# Upload an image
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
if uploaded_file is not None:
# Open and display the uploaded image
image = Image.open(uploaded_file)
st.image(image, caption="Uploaded Image", use_column_width=True)
# Load model
onnx_model = load_model()
# Preprocess the image
image_input = preprocess_image(image)
# Get emotion prediction
emotion_prediction = predict_emotion_onnx(onnx_model, image_input)
# Get the emotion label and confidence
emotion_label, confidence = get_emotion_from_output(emotion_prediction)
# Display the predicted emotion and confidence
st.write(f"Predicted Emotion: {emotion_label}")
st.write(f"Confidence: {confidence:.2f}")
|