import streamlit as st #from transformers import AutoTokenizer from llama_cpp import Llama from transformers import pipeline #from peft import PeftModel, PeftConfig #from transformers import AutoModelForCausalLM from datasets import load_dataset # Replace with the direct image URL flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png" # Inject custom CSS for the background with a centered and blurred image st.markdown( f""" """, unsafe_allow_html=True ) # Add the blurred background div st.markdown('
', unsafe_allow_html=True) #""""""""""""""""""""""""" Application Code Starts here """"""""""""""""""""""""""""""""""""""""""""" # Load the text generation model pipeline @st.cache_resource def load_text_generation_model(): try: return pipeline("text-generation", model="thrishala/mental_health_chatbot") except Exception as e: st.error(f"Error loading model: {e}") return None text_generator = load_text_generation_model() # Load the counseling dataset @st.cache_resource def load_counseling_dataset(): return load_dataset("Amod/mental_health_counseling_conversations") dataset = load_counseling_dataset() # Streamlit App st.title("Mental Health Counseling Chat") st.markdown(""" Welcome to the **Mental Health Counseling Chat Application**. This platform is designed to provide **supportive, positive, and encouraging responses** based on mental health counseling expertise. """) # Check if the model loaded correctly if text_generator is None: st.error("The text generation model could not be loaded. Please check your Hugging Face configuration.") else: # Explore dataset for additional context or resources (optional) if st.checkbox("Show Example Questions and Answers from Dataset"): sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples for example in sample: st.markdown(f"**Question:** {example.get('context', 'N/A')}") st.markdown(f"**Answer:** {example.get('response', 'N/A')}") st.markdown("---") # User input for mental health concerns user_input = st.text_area("Your question or concern:", placeholder="Type your question here...") if st.button("Get Supportive Response"): if user_input.strip(): try: # Generate response using the text generation pipeline prompt = f"User: {user_input}\nCounselor:" response = text_generator(prompt, max_length=200, num_return_sequences=1) # Extract and display the response counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip() st.subheader("Counselor's Response:") st.write(counselor_reply) except Exception as e: st.error(f"An error occurred while generating the response: {e}") else: st.error("Please enter a question or concern to receive a response.") # Sidebar resources st.sidebar.header("Additional Mental Health Resources") st.sidebar.markdown(""" - [Mental Health Foundation](https://www.mentalhealth.org) - [Mind](https://www.mind.org.uk) - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org) """) st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")