wellbeing_GenAI / app.py
tahirsher's picture
Update app.py
51a37a0 verified
raw
history blame
4.29 kB
import streamlit as st
#from transformers import AutoTokenizer
from llama_cpp import Llama
from transformers import pipeline
#from peft import PeftModel, PeftConfig
#from transformers import AutoModelForCausalLM
from datasets import load_dataset
# Replace with the direct image URL
flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png"
# Inject custom CSS for the background with a centered and blurred image
st.markdown(
f"""
<style>
/* Container for background */
html, body {{
margin: 0;
padding: 0;
overflow: hidden;
}}
[data-testid="stAppViewContainer"] {{
position: relative;
z-index: 1; /* Ensure UI elements are above the background */
}}
/* Blurred background image */
.blurred-background {{
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: -1; /* Send background image behind all UI elements */
background-image: url("{flower_image_url}");
background-size: cover;
background-position: center;
filter: blur(10px); /* Adjust blur ratio here */
opacity: 0.8; /* Optional: Add slight transparency for a subtle effect */
}}
</style>
""",
unsafe_allow_html=True
)
# Add the blurred background div
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
#""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
# Path to the GGUF model file
MODEL_PATH = "QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF/model.gguf"
# Load Llama model
@st.cache_resource
def load_llama_model():
try:
return Llama(model_path=MODEL_PATH, n_threads=8) # Adjust `n_threads` based on your system
except Exception as e:
st.error(f"Error loading model: {e}")
return None
llama_model = load_llama_model()
# Load counseling dataset
@st.cache_resource
def load_counseling_dataset():
return load_dataset("Amod/mental_health_counseling_conversations")
dataset = load_counseling_dataset()
# Streamlit App
st.title("Mental Health Counseling Chat")
st.markdown("""
Welcome to the **Mental Health Counseling Chat Application**.
This platform is designed to provide **supportive, positive, and encouraging responses** based on mental health counseling expertise.
""")
# Check if the model loaded correctly
if llama_model is None:
st.error("The text generation model could not be loaded. Please check your configuration.")
else:
# Explore dataset for additional context or resources (optional)
if st.checkbox("Show Example Questions and Answers from Dataset"):
sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
for example in sample:
st.markdown(f"**Question:** {example.get('context', 'N/A')}")
st.markdown(f"**Answer:** {example.get('response', 'N/A')}")
st.markdown("---")
# User input for mental health concerns
user_input = st.text_area("Your question or concern:", placeholder="Type your question here...")
if st.button("Get Supportive Response"):
if user_input.strip():
try:
# Generate response using Llama
prompt = f"User: {user_input}\nCounselor:"
response = llama_model(prompt, max_tokens=200, stop=["\n", "User:"])
# Extract and display the response
st.subheader("Counselor's Response:")
st.write(response["choices"][0]["text"].strip())
except Exception as e:
st.error(f"An error occurred while generating the response: {e}")
else:
st.error("Please enter a question or concern to receive a response.")
# Sidebar resources
st.sidebar.header("Additional Mental Health Resources")
st.sidebar.markdown("""
- [Mental Health Foundation](https://www.mentalhealth.org)
- [Mind](https://www.mind.org.uk)
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
""")
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")