Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,31 +1,23 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
import torch
|
|
|
4 |
|
5 |
-
# Function to load the model with error handling and
|
6 |
@st.cache_resource
|
7 |
def load_model():
|
8 |
try:
|
9 |
-
# Attempt to load a model
|
10 |
st.write("Attempting to load the emotion analysis model...")
|
11 |
-
|
|
|
12 |
st.write("Model loaded successfully!")
|
13 |
return emotion_analyzer
|
14 |
except Exception as e:
|
15 |
st.write(f"Error loading the model: {e}")
|
16 |
return None
|
17 |
|
18 |
-
# Initialize the model (with caching to prevent reloads)
|
19 |
-
emotion_analyzer = load_model()
|
20 |
-
|
21 |
-
# Check if the model is loaded
|
22 |
-
if emotion_analyzer is None:
|
23 |
-
st.warning("The emotion analysis model could not be loaded. Please try again.")
|
24 |
-
else:
|
25 |
-
st.success("Emotion model is ready for predictions!")
|
26 |
-
|
27 |
# Function to predict emotion for a single response
|
28 |
-
def predict_emotion_single(response):
|
29 |
if emotion_analyzer is None:
|
30 |
st.error("Model not loaded. Please try reloading the app.")
|
31 |
return {"Error": "Emotion analyzer model not initialized. Please check model loading."}
|
@@ -54,11 +46,27 @@ questions = [
|
|
54 |
# Initialize a dictionary to store responses
|
55 |
responses = {}
|
56 |
|
57 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
for i, question in enumerate(questions, start=1):
|
59 |
user_response = st.text_input(f"Question {i}: {question}")
|
60 |
if user_response:
|
61 |
-
analysis = predict_emotion_single(user_response)
|
62 |
responses[question] = (user_response, analysis)
|
63 |
st.write(f"**Your Response**: {user_response}")
|
64 |
st.write(f"**Emotion Analysis**: {analysis}")
|
@@ -71,9 +79,4 @@ if st.button("Clear Responses"):
|
|
71 |
if st.button("Submit Responses"):
|
72 |
if responses:
|
73 |
st.write("-- Emotion Analysis Results ---")
|
74 |
-
for i, (question, (response
|
75 |
-
st.write(f"\n**Question {i}:** {question}")
|
76 |
-
st.write(f"Your Response: {response}")
|
77 |
-
st.write(f"Emotion Analysis: {analysis}")
|
78 |
-
else:
|
79 |
-
st.write("Please answer all the questions before submitting.")
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline
|
3 |
import torch
|
4 |
+
import time
|
5 |
|
6 |
+
# Function to load the model with error handling and retries
|
7 |
@st.cache_resource
|
8 |
def load_model():
|
9 |
try:
|
|
|
10 |
st.write("Attempting to load the emotion analysis model...")
|
11 |
+
# Using a smaller model for quick load times
|
12 |
+
emotion_analyzer = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta", device=0 if torch.cuda.is_available() else -1)
|
13 |
st.write("Model loaded successfully!")
|
14 |
return emotion_analyzer
|
15 |
except Exception as e:
|
16 |
st.write(f"Error loading the model: {e}")
|
17 |
return None
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
# Function to predict emotion for a single response
|
20 |
+
def predict_emotion_single(response, emotion_analyzer):
|
21 |
if emotion_analyzer is None:
|
22 |
st.error("Model not loaded. Please try reloading the app.")
|
23 |
return {"Error": "Emotion analyzer model not initialized. Please check model loading."}
|
|
|
46 |
# Initialize a dictionary to store responses
|
47 |
responses = {}
|
48 |
|
49 |
+
# Initialize the emotion analysis model with retries
|
50 |
+
emotion_analyzer = None
|
51 |
+
max_retries = 3
|
52 |
+
retry_delay = 5 # seconds
|
53 |
+
|
54 |
+
# Try loading the model with retries
|
55 |
+
for attempt in range(max_retries):
|
56 |
+
emotion_analyzer = load_model()
|
57 |
+
if emotion_analyzer:
|
58 |
+
break
|
59 |
+
if attempt < max_retries - 1:
|
60 |
+
st.warning(f"Retrying model load... Attempt {attempt + 2}/{max_retries}")
|
61 |
+
time.sleep(retry_delay)
|
62 |
+
else:
|
63 |
+
st.error("Model failed to load after multiple attempts. Please try again later.")
|
64 |
+
|
65 |
+
# Function to handle responses and emotion analysis
|
66 |
for i, question in enumerate(questions, start=1):
|
67 |
user_response = st.text_input(f"Question {i}: {question}")
|
68 |
if user_response:
|
69 |
+
analysis = predict_emotion_single(user_response, emotion_analyzer)
|
70 |
responses[question] = (user_response, analysis)
|
71 |
st.write(f"**Your Response**: {user_response}")
|
72 |
st.write(f"**Emotion Analysis**: {analysis}")
|
|
|
79 |
if st.button("Submit Responses"):
|
80 |
if responses:
|
81 |
st.write("-- Emotion Analysis Results ---")
|
82 |
+
for i, (question, (response
|
|
|
|
|
|
|
|
|
|