import onnxruntime as ort import numpy as np import cv2 from PIL import Image import streamlit as st # Load ONNX model onnx_model = ort.InferenceSession("onnx_model.onnx") # Preprocess image function def preprocess_image(image): """Preprocess image to match model input requirements""" image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert to grayscale image_resized = cv2.resize(image, (48, 48)) # Resize to 48x48 image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension image_input = np.expand_dims(image_input, axis=3) # Add channel dimension image_input = image_input.astype(np.float32) / 255.0 # Normalize return image_input # Predict emotion using the ONNX model def predict_emotion_onnx(onnx_model, image_input): input_name = onnx_model.get_inputs()[0].name output_name = onnx_model.get_outputs()[0].name prediction = onnx_model.run([output_name], {input_name: image_input}) return prediction # Streamlit interface st.title("Emotion Recognition with ONNX") uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) if uploaded_file is not None: image = Image.open(uploaded_file) st.image(image, caption="Uploaded Image", use_column_width=True) # Preprocess the image image_input = preprocess_image(image) # Predict the emotion emotion_prediction = predict_emotion_onnx(onnx_model, image_input) # Display the prediction st.write(f"Predicted Emotion: {emotion_prediction[0]}")