Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,89 +1,43 @@
|
|
1 |
-
import cv2
|
2 |
-
import numpy as np
|
3 |
import onnxruntime as ort
|
|
|
|
|
4 |
from PIL import Image
|
5 |
import streamlit as st
|
6 |
|
7 |
-
#
|
8 |
-
|
9 |
-
"""Preprocess image for the ONNX emotion model."""
|
10 |
-
image_resized = cv2.resize(image, (224, 224)) # Resize to match ONNX input
|
11 |
-
image_normalized = image_resized.astype(np.float32) / 255.0 # Normalize image
|
12 |
-
image_input = np.transpose(image_normalized, (2, 0, 1)) # Change dimension to (C, H, W)
|
13 |
-
image_input = np.expand_dims(image_input, axis=0) # Add batch dimension
|
14 |
-
return image_input
|
15 |
|
16 |
-
#
|
17 |
-
def
|
18 |
-
"""Preprocess image
|
19 |
-
|
20 |
-
image_resized = cv2.resize(
|
21 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
22 |
-
image_input = np.expand_dims(image_input, axis=
|
23 |
-
image_input = image_input.astype(np.float32) / 255.0 # Normalize
|
24 |
return image_input
|
25 |
|
26 |
-
#
|
27 |
-
def load_onnx_model(model_path):
|
28 |
-
"""Load the ONNX emotion detection model."""
|
29 |
-
return ort.InferenceSession(model_path)
|
30 |
-
|
31 |
-
# Step 4: Predict Emotion with ONNX Model
|
32 |
def predict_emotion_onnx(onnx_model, image_input):
|
33 |
-
"""Predict emotion using the ONNX model."""
|
34 |
input_name = onnx_model.get_inputs()[0].name
|
35 |
output_name = onnx_model.get_outputs()[0].name
|
36 |
prediction = onnx_model.run([output_name], {input_name: image_input})
|
37 |
return prediction
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
"""Predict emotion using AffectNet (Placeholder function)."""
|
42 |
-
# Placeholder for the actual AffectNet prediction
|
43 |
-
# This would involve loading a model trained on AffectNet and making predictions.
|
44 |
-
emotion = "happy" # Example: This would be the predicted emotion
|
45 |
-
return emotion
|
46 |
|
47 |
-
|
48 |
-
def combine_predictions(affectnet_emotion, onnx_emotion):
|
49 |
-
"""Combine predictions from AffectNet and ONNX models."""
|
50 |
-
if affectnet_emotion == onnx_emotion:
|
51 |
-
return affectnet_emotion # If both models agree, return the same emotion
|
52 |
-
else:
|
53 |
-
return "Uncertain" # If they disagree, return "Uncertain"
|
54 |
|
55 |
-
# Step 7: Main Application Logic
|
56 |
-
# Load the ONNX model from the application directory
|
57 |
-
onnx_model_path = 'onnx_model.onnx' # Path to your ONNX model
|
58 |
-
onnx_model = load_onnx_model(onnx_model_path)
|
59 |
-
|
60 |
-
# Step 8: Streamlit App Logic (User Interaction)
|
61 |
-
st.title('Emotion Detection App')
|
62 |
-
|
63 |
-
# Upload image via Streamlit interface
|
64 |
-
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
65 |
-
|
66 |
-
# If an image is uploaded, proceed with emotion prediction
|
67 |
if uploaded_file is not None:
|
68 |
-
# Open and display the uploaded image
|
69 |
image = Image.open(uploaded_file)
|
70 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
71 |
|
72 |
-
# Preprocess image
|
73 |
-
|
74 |
-
|
75 |
-
# Preprocess image for ONNX model (RGB)
|
76 |
-
image_input_onnx = preprocess_for_onnx(np.array(image))
|
77 |
-
|
78 |
-
# Predict emotion using AffectNet model
|
79 |
-
affectnet_emotion = predict_emotion_affectnet(image_input_affectnet)
|
80 |
-
|
81 |
-
# Predict emotion using ONNX model
|
82 |
-
onnx_prediction = predict_emotion_onnx(onnx_model, image_input_onnx)
|
83 |
-
onnx_emotion = onnx_prediction[0][0] # Assuming the model outputs a single emotion label
|
84 |
|
85 |
-
#
|
86 |
-
|
87 |
|
88 |
-
# Display the
|
89 |
-
st.write(f"
|
|
|
|
|
|
|
1 |
import onnxruntime as ort
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
from PIL import Image
|
5 |
import streamlit as st
|
6 |
|
7 |
+
# Load ONNX model
|
8 |
+
onnx_model = ort.InferenceSession("onnx_model.onnx")
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
# Preprocess image function
|
11 |
+
def preprocess_image(image):
|
12 |
+
"""Preprocess image to match model input requirements"""
|
13 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) # Convert to grayscale
|
14 |
+
image_resized = cv2.resize(image, (48, 48)) # Resize to 48x48
|
15 |
image_input = np.expand_dims(image_resized, axis=0) # Add batch dimension
|
16 |
+
image_input = np.expand_dims(image_input, axis=3) # Add channel dimension
|
17 |
+
image_input = image_input.astype(np.float32) / 255.0 # Normalize
|
18 |
return image_input
|
19 |
|
20 |
+
# Predict emotion using the ONNX model
|
|
|
|
|
|
|
|
|
|
|
21 |
def predict_emotion_onnx(onnx_model, image_input):
|
|
|
22 |
input_name = onnx_model.get_inputs()[0].name
|
23 |
output_name = onnx_model.get_outputs()[0].name
|
24 |
prediction = onnx_model.run([output_name], {input_name: image_input})
|
25 |
return prediction
|
26 |
|
27 |
+
# Streamlit interface
|
28 |
+
st.title("Emotion Recognition with ONNX")
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
if uploaded_file is not None:
|
|
|
33 |
image = Image.open(uploaded_file)
|
34 |
st.image(image, caption="Uploaded Image", use_column_width=True)
|
35 |
|
36 |
+
# Preprocess the image
|
37 |
+
image_input = preprocess_image(image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
# Predict the emotion
|
40 |
+
emotion_prediction = predict_emotion_onnx(onnx_model, image_input)
|
41 |
|
42 |
+
# Display the prediction
|
43 |
+
st.write(f"Predicted Emotion: {emotion_prediction[0]}")
|