Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,227 +1,220 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import
|
5 |
-
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
return
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
)
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
else:
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
if
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
background-color: #
|
168 |
-
border-radius: 10px;
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
gr.
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
with gr.Row():
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
#
|
212 |
-
|
213 |
-
|
214 |
-
inputs=
|
215 |
-
outputs=[chatbot, emotion_display, text_input, audio_output]
|
216 |
-
)
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
lambda: ([], "", "", None),
|
221 |
-
inputs=None,
|
222 |
-
outputs=[chatbot, emotion_display, text_input, audio_output]
|
223 |
-
)
|
224 |
-
|
225 |
-
# Launch the app (for local testing; deployment will handle this differently)
|
226 |
-
if __name__ == "__main__":
|
227 |
app.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import speech_recognition as sr
|
3 |
+
from groq import Groq
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
import base64
|
7 |
+
from io import BytesIO
|
8 |
+
from gtts import gTTS
|
9 |
+
|
10 |
+
# Set device
|
11 |
+
|
12 |
+
|
13 |
+
# Grok API client with API key (stored as environment variable for security)
|
14 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY", "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") # Replace with your key or use env variable
|
15 |
+
try:
|
16 |
+
client = Groq(api_key=GROQ_API_KEY)
|
17 |
+
print("Grok client initialized successfully")
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Error initializing Groq client: {str(e)}")
|
20 |
+
raise
|
21 |
+
|
22 |
+
# Functions
|
23 |
+
def predict_text_emotion(text):
|
24 |
+
prompt = f"The user has entered text '{text}' classify user's emotion as happy or sad or anxious or angry. Respond in only one word."
|
25 |
+
try:
|
26 |
+
completion = client.chat.completions.create(
|
27 |
+
model="llama-3.2-90b-vision-preview",
|
28 |
+
messages=[{"role": "user", "content": prompt}],
|
29 |
+
temperature=1,
|
30 |
+
max_completion_tokens=64,
|
31 |
+
top_p=1,
|
32 |
+
stream=False,
|
33 |
+
stop=None,
|
34 |
+
)
|
35 |
+
return completion.choices[0].message.content
|
36 |
+
except Exception as e:
|
37 |
+
return f"Error with Grok API: {str(e)}"
|
38 |
+
|
39 |
+
def transcribe_audio(audio_path):
|
40 |
+
r = sr.Recognizer()
|
41 |
+
with sr.AudioFile(audio_path) as source:
|
42 |
+
audio_text = r.listen(source)
|
43 |
+
try:
|
44 |
+
text = r.recognize_google(audio_text)
|
45 |
+
return text
|
46 |
+
except sr.UnknownValueError:
|
47 |
+
return "I didn’t catch that—could you try again?"
|
48 |
+
except sr.RequestError:
|
49 |
+
return "Speech recognition unavailable—try typing instead."
|
50 |
+
|
51 |
+
def capture_webcam_frame():
|
52 |
+
cap = cv2.VideoCapture(0)
|
53 |
+
if not cap.isOpened():
|
54 |
+
return None
|
55 |
+
start_time = time.time()
|
56 |
+
while time.time() - start_time < 2:
|
57 |
+
ret, frame = cap.read()
|
58 |
+
if ret:
|
59 |
+
_, buffer = cv2.imencode('.jpg', frame)
|
60 |
+
img_base64 = base64.b64encode(buffer).decode('utf-8')
|
61 |
+
img_url = f"data:image/jpeg;base64,{img_base64}"
|
62 |
+
cap.release()
|
63 |
+
return img_url
|
64 |
+
cap.release()
|
65 |
+
return None
|
66 |
+
|
67 |
+
def detect_facial_emotion():
|
68 |
+
img_url = capture_webcam_frame()
|
69 |
+
if not img_url:
|
70 |
+
return "neutral"
|
71 |
+
try:
|
72 |
+
completion = client.chat.completions.create(
|
73 |
+
model="llama-3.2-90b-vision-preview",
|
74 |
+
messages=[
|
75 |
+
{
|
76 |
+
"role": "user",
|
77 |
+
"content": [
|
78 |
+
{"type": "text", "text": "Identify user's facial emotion into happy or sad or anxious or angry. Respond in one word only"},
|
79 |
+
{"type": "image_url", "image_url": {"url": img_url}}
|
80 |
+
]
|
81 |
+
}
|
82 |
+
],
|
83 |
+
temperature=1,
|
84 |
+
max_completion_tokens=20,
|
85 |
+
top_p=1,
|
86 |
+
stream=False,
|
87 |
+
stop=None,
|
88 |
+
)
|
89 |
+
emotion = completion.choices[0].message.content.strip().lower()
|
90 |
+
if emotion not in ["happy", "sad", "anxious", "angry"]:
|
91 |
+
return "neutral"
|
92 |
+
return emotion
|
93 |
+
except Exception as e:
|
94 |
+
print(f"Error with Grok facial detection: {str(e)}")
|
95 |
+
return "neutral"
|
96 |
+
|
97 |
+
def generate_response(user_input, emotion):
|
98 |
+
prompt = f"The user is feeling {emotion}. They said: '{user_input}'. Respond in a friendly caring manner with the user so the user feels being loved."
|
99 |
+
try:
|
100 |
+
completion = client.chat.completions.create(
|
101 |
+
model="llama-3.2-90b-vision-preview",
|
102 |
+
messages=[{"role": "user", "content": prompt}],
|
103 |
+
temperature=1,
|
104 |
+
max_completion_tokens=64,
|
105 |
+
top_p=1,
|
106 |
+
stream=False,
|
107 |
+
stop=None,
|
108 |
+
)
|
109 |
+
return completion.choices[0].message.content
|
110 |
+
except Exception as e:
|
111 |
+
return f"Error with Groq API: {str(e)}"
|
112 |
+
|
113 |
+
def text_to_speech(text):
|
114 |
+
try:
|
115 |
+
tts = gTTS(text=text, lang='en', slow=False)
|
116 |
+
audio_buffer = BytesIO()
|
117 |
+
tts.write_to_fp(audio_buffer)
|
118 |
+
audio_buffer.seek(0)
|
119 |
+
return audio_buffer
|
120 |
+
except Exception as e:
|
121 |
+
print(f"Error generating speech: {str(e)}")
|
122 |
+
return None
|
123 |
+
|
124 |
+
# Chat function for Gradio with voice output
|
125 |
+
def chat_function(input_type, text_input, audio_input, chat_history):
|
126 |
+
if input_type == "text" and text_input:
|
127 |
+
user_input = text_input
|
128 |
+
elif input_type == "voice" and audio_input:
|
129 |
+
user_input = transcribe_audio(audio_input)
|
130 |
+
else:
|
131 |
+
return chat_history, "Please provide text or voice input.", gr.update(value=text_input), None
|
132 |
+
|
133 |
+
text_emotion = predict_text_emotion(user_input)
|
134 |
+
if not chat_history:
|
135 |
+
gr.Info("Please look at the camera for emotion detection...")
|
136 |
+
facial_emotion = detect_facial_emotion()
|
137 |
+
else:
|
138 |
+
facial_emotion = "neutral"
|
139 |
+
|
140 |
+
emotions = [e for e in [text_emotion, facial_emotion] if e and e != "neutral"]
|
141 |
+
combined_emotion = emotions[0] if emotions else "neutral"
|
142 |
+
|
143 |
+
response = generate_response(user_input, combined_emotion)
|
144 |
+
chat_history.append({"role": "user", "content": user_input})
|
145 |
+
chat_history.append({"role": "assistant", "content": response})
|
146 |
+
|
147 |
+
audio_output = text_to_speech(response)
|
148 |
+
return chat_history, f"Detected Emotion: {combined_emotion}", "", audio_output
|
149 |
+
|
150 |
+
# Custom CSS for better styling
|
151 |
+
css = """
|
152 |
+
<style>
|
153 |
+
.chatbot .message-user {
|
154 |
+
background-color: #e3f2fd;
|
155 |
+
border-radius: 10px;
|
156 |
+
padding: 10px;
|
157 |
+
margin: 5px 0;
|
158 |
+
}
|
159 |
+
.chatbot .message-assistant {
|
160 |
+
background-color: #c8e6c9;
|
161 |
+
border-radius: 10px;
|
162 |
+
padding: 10px;
|
163 |
+
margin: 5px 0;
|
164 |
+
}
|
165 |
+
.input-container {
|
166 |
+
padding: 10px;
|
167 |
+
background-color: #f9f9f9;
|
168 |
+
border-radius: 10px;
|
169 |
+
margin-top: 10px;
|
170 |
+
}
|
171 |
+
</style>
|
172 |
+
"""
|
173 |
+
|
174 |
+
# Build the Gradio interface
|
175 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
176 |
+
gr.Markdown(
|
177 |
+
"""
|
178 |
+
# Multimodal Mental Health AI Agent
|
179 |
+
Chat with our empathetic AI designed to support you by understanding your emotions through text and facial expressions.
|
180 |
+
"""
|
181 |
+
)
|
182 |
+
|
183 |
+
with gr.Row():
|
184 |
+
with gr.Column(scale=1):
|
185 |
+
emotion_display = gr.Textbox(label="Emotion", interactive=False, placeholder="Detected emotion will appear here")
|
186 |
+
|
187 |
+
with gr.Column(scale=3):
|
188 |
+
chatbot = gr.Chatbot(label="Conversation History", height=500, type="messages", elem_classes="chatbot")
|
189 |
+
|
190 |
+
with gr.Row(elem_classes="input-container"):
|
191 |
+
input_type = gr.Radio(["text", "voice"], label="Input Method", value="text")
|
192 |
+
text_input = gr.Textbox(label="Type Your Message", placeholder="How are you feeling today?", visible=True)
|
193 |
+
audio_input = gr.Audio(type="filepath", label="Record Your Message", visible=False)
|
194 |
+
submit_btn = gr.Button("Send", variant="primary")
|
195 |
+
clear_btn = gr.Button("Clear Chat", variant="secondary")
|
196 |
+
audio_output = gr.Audio(label="Assistant Response", type="filepath", interactive=False, autoplay=True)
|
197 |
+
|
198 |
+
# Dynamic visibility based on input type
|
199 |
+
def update_visibility(input_type):
|
200 |
+
return gr.update(visible=input_type == "text"), gr.update(visible=input_type == "voice")
|
201 |
+
|
202 |
+
input_type.change(fn=update_visibility, inputs=input_type, outputs=[text_input, audio_input])
|
203 |
+
|
204 |
+
# Submit action with voice output
|
205 |
+
submit_btn.click(
|
206 |
+
fn=chat_function,
|
207 |
+
inputs=[input_type, text_input, audio_input, chatbot],
|
208 |
+
outputs=[chatbot, emotion_display, text_input, audio_output]
|
209 |
+
)
|
210 |
+
|
211 |
+
# Clear chat and audio
|
212 |
+
clear_btn.click(
|
213 |
+
lambda: ([], "", "", None),
|
214 |
+
inputs=None,
|
215 |
+
outputs=[chatbot, emotion_display, text_input, audio_output]
|
216 |
+
)
|
217 |
+
|
218 |
+
# Launch the app (for local testing; deployment will handle this differently)
|
219 |
+
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
app.launch(server_name="0.0.0.0", server_port=7860)
|