Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from groq import Groq | |
| import os | |
| import tempfile | |
| from gtts import gTTS | |
| import logging | |
| # Set up logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Set device (CPU only for Hugging Face Spaces free tier) | |
| device = torch.device("cpu") | |
| logger.info(f"Using device: {device}") | |
| # Groq API client with API key from Hugging Face Secrets | |
| GROQ_API_KEY = os.getenv("GROQ_API_KEY") | |
| if not GROQ_API_KEY: | |
| logger.error("GROQ_API_KEY environment variable not set") | |
| raise ValueError("GROQ_API_KEY environment variable not set") | |
| try: | |
| client = Groq(api_key=GROQ_API_KEY) | |
| logger.info("Grok client initialized successfully") | |
| except Exception as e: | |
| logger.error(f"Error initializing Groq client: {str(e)}") | |
| raise | |
| # Functions | |
| def predict_text_emotion(text): | |
| prompt = f"The user has entered text '{text}' classify user's emotion as happy or sad or anxious or angry. Respond in only one word." | |
| try: | |
| completion = client.chat.completions.create( | |
| model="llama3-70b-8192", | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=1, | |
| max_tokens=64, | |
| top_p=1, | |
| stream=False, | |
| ) | |
| return completion.choices[0].message.content.strip().lower() | |
| except Exception as e: | |
| logger.error(f"Error with Groq API (text emotion): {str(e)}") | |
| return "neutral" | |
| def generate_response(user_input, emotion): | |
| prompt = f"The user is feeling {emotion}. They said: '{user_input}'. Respond in a friendly caring manner with the user so the user feels being loved." | |
| try: | |
| completion = client.chat.completions.create( | |
| model="llama3-70b-8192", | |
| messages=[{"role": "user", "content": prompt}], | |
| temperature=1, | |
| max_tokens=64, | |
| top_p=1, | |
| stream=False, | |
| ) | |
| return completion.choices[0].message.content | |
| except Exception as e: | |
| logger.error(f"Error with Groq API (response generation): {str(e)}") | |
| return "I'm here for you, but something went wrong. How can I help?" | |
| def text_to_speech(text): | |
| try: | |
| tts = gTTS(text=text, lang='en', slow=False) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio: | |
| tts.save(temp_audio.name) | |
| return temp_audio.name | |
| except Exception as e: | |
| logger.error(f"Error generating speech: {str(e)}") | |
| return None | |
| # Chat function for Gradio with voice output (text input only) | |
| def chat_function(input_type, text_input, audio_input, chat_history): | |
| if input_type == "text" and text_input: | |
| user_input = text_input | |
| else: | |
| return chat_history, "Please provide text input. Voice input is not supported.", gr.update(value=text_input), None | |
| emotion = predict_text_emotion(user_input) | |
| response = generate_response(user_input, emotion) | |
| chat_history = chat_history or [] | |
| chat_history.append({"role": "user", "content": user_input}) | |
| chat_history.append({"role": "assistant", "content": response}) | |
| audio_output = text_to_speech(response) | |
| return chat_history, f"Detected Emotion: {emotion}", "", audio_output | |
| # Custom CSS for styling | |
| css = """ | |
| .chatbot .message-user { | |
| background-color: #e3f2fd; | |
| border-radius: 10px; | |
| padding: 10px; | |
| margin: 5px 0; | |
| } | |
| .chatbot .message-assistant { | |
| background-color: #c8e6c9; | |
| border-radius: 10px; | |
| padding: 10px; | |
| margin: 5px 0; | |
| } | |
| .input-container { | |
| padding: 10px; | |
| background-color: #f9f9f9; | |
| border-radius: 10px; | |
| margin-top: 10px; | |
| } | |
| """ | |
| # Build the Gradio interface | |
| try: | |
| with gr.Blocks(theme=gr.themes.Soft(), css=css) as app: | |
| gr.Markdown( | |
| """ | |
| # Multimodal Mental Health AI Agent | |
| Chat with our empathetic AI designed to support you by understanding your emotions through text. | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| emotion_display = gr.Textbox(label="Emotion", interactive=False, placeholder="Detected emotion will appear here") | |
| with gr.Column(scale=3): | |
| chatbot = gr.Chatbot(label="Conversation History", height=500, type="messages", elem_classes="chatbot") | |
| with gr.Row(elem_classes="input-container"): | |
| input_type = gr.Radio(["text", "voice"], label="Input Method", value="text") | |
| text_input = gr.Textbox(label="Type Your Message", placeholder="How are you feeling today?", visible=True) | |
| audio_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Your Message", visible=False) | |
| submit_btn = gr.Button("Send", variant="primary") | |
| clear_btn = gr.Button("Clear Chat", variant="secondary") | |
| audio_output = gr.Audio(label="Assistant Response", type="filepath", interactive=False, autoplay=True) | |
| # Dynamic visibility based on input type | |
| def update_visibility(input_type): | |
| return gr.update(visible=input_type == "text"), gr.update(visible=input_type == "voice") | |
| input_type.change(fn=update_visibility, inputs=input_type, outputs=[text_input, audio_input]) | |
| # Submit action with voice output | |
| submit_btn.click( | |
| fn=chat_function, | |
| inputs=[input_type, text_input, audio_input, chatbot], | |
| outputs=[chatbot, emotion_display, text_input, audio_output] | |
| ) | |
| # Clear chat and audio | |
| clear_btn.click( | |
| lambda: ([], "", "", None), | |
| inputs=None, | |
| outputs=[chatbot, emotion_display, text_input, audio_output] | |
| ) | |
| except Exception as e: | |
| logger.error(f"Error initializing Gradio interface: {str(e)}") | |
| raise | |
| # Launch the app (commented out for Hugging Face Spaces) | |
| # if __name__ == "__main__": | |
| # app.launch(server_name="0.0.0.0", server_port=7860) |