wellbeing_GenAI / app.py
tahirsher's picture
Update app.py
d02fed4 verified
raw
history blame
3.51 kB
import streamlit as st
from transformers import AutoTokenizer
from transformers import pipeline
#from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM
from datasets import load_dataset
# Replace with the direct image URL
flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png"
# Inject custom CSS for the background with a centered and blurred image
st.markdown(
f"""
<style>
/* Container for background */
html, body {{
margin: 0;
padding: 0;
overflow: hidden;
}}
[data-testid="stAppViewContainer"] {{
position: relative;
z-index: 1; /* Ensure UI elements are above the background */
}}
/* Blurred background image */
.blurred-background {{
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: -1; /* Send background image behind all UI elements */
background-image: url("{flower_image_url}");
background-size: cover;
background-position: center;
filter: blur(10px); /* Adjust blur ratio here */
opacity: 0.8; /* Optional: Add slight transparency for a subtle effect */
}}
</style>
""",
unsafe_allow_html=True
)
# Add the blurred background div
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
# Load dataset for context
@st.cache_resource
def load_counseling_dataset():
return load_dataset("Amod/mental_health_counseling_conversations")
dataset = load_counseling_dataset()
# Load a Hugging Face-compatible text-generation model
@st.cache_resource
def load_text_generation_model():
return pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf")
text_generator = load_text_generation_model()
# Streamlit app
st.title("Mental Health Counseling Chat")
st.markdown("""
Welcome to the Mental Health Counseling Chat application.
This platform is designed to provide supportive, positive, and encouraging responses based on mental health counseling expertise.
""")
# Explore dataset for additional context or resources (optional)
if st.checkbox("Show Example Questions and Answers from Dataset"):
sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
for example in sample:
st.markdown(f"**Question:** {example['context']}")
st.markdown(f"**Answer:** {example['response']}")
st.markdown("---")
# User input for mental health concerns
user_input = st.text_area("Your question or concern:", placeholder="Type here...")
if st.button("Get Supportive Response"):
if user_input.strip():
# Generate response using the model
prompt = f"User: {user_input}\nCounselor:"
response = text_generator(prompt, max_length=200, num_return_sequences=1)
counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip()
st.subheader("Counselor's Response:")
st.write(counselor_reply)
else:
st.error("Please enter a question or concern to receive a response.")
# Sidebar resources
st.sidebar.header("Additional Mental Health Resources")
st.sidebar.markdown("""
- [Mental Health Foundation](https://www.mentalhealth.org)
- [Mind](https://www.mind.org.uk)
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
""")
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")