File size: 4,313 Bytes
10e4cb6
de3564a
ab6d6cd
adf24b1
7ffd344
de3564a
3a64fb1
10e4cb6
34a4f65
a4cce9a
34a4f65
9903fee
72d0e7a
34a4f65
72d0e7a
9903fee
 
 
 
 
 
 
 
 
34a4f65
9903fee
 
 
 
 
be8b77d
 
9903fee
 
 
 
 
 
61d9513
72d0e7a
 
 
 
 
9903fee
 
 
adf24b1
de3564a
fe6ceec
ab6d6cd
fe6ceec
ab6d6cd
fe6ceec
ab6d6cd
 
 
 
fe6ceec
ab6d6cd
fe6ceec
3a64fb1
d02fed4
 
61d9513
d02fed4
10e4cb6
de3564a
d02fed4
3a64fb1
de3564a
 
3a64fb1
 
ab6d6cd
fe6ceec
 
ab6d6cd
 
 
 
 
 
 
 
87a519d
ab6d6cd
de3564a
3a64fb1
ab6d6cd
 
 
fe6ceec
ab6d6cd
fe6ceec
ab6d6cd
adf24b1
fe6ceec
ab6d6cd
fe6ceec
ab6d6cd
 
 
 
3a64fb1
ab6d6cd
 
 
 
 
 
 
10e4cb6
de3564a
51a37a0
fe6ceec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import streamlit as st
#from transformers import AutoTokenizer
from llama_cpp import Llama
from transformers import pipeline
#from peft import PeftModel, PeftConfig
#from transformers import AutoModelForCausalLM
from datasets import load_dataset

# Replace with the direct image URL
flower_image_url = "https://i.postimg.cc/hG2FG85D/2.png"

# Inject custom CSS for the background with a centered and blurred image
st.markdown(
    f"""
    <style>
    /* Container for background */
    html, body {{
        margin: 0;
        padding: 0;
        overflow: hidden;
    }}
    [data-testid="stAppViewContainer"] {{
        position: relative;
        z-index: 1; /* Ensure UI elements are above the background */
    }}
    /* Blurred background image */
    .blurred-background {{
        position: fixed;
        top: 0;
        left: 0;
        width: 100%;
        height: 100%;
        z-index: -1; /* Send background image behind all UI elements */
        background-image: url("{flower_image_url}");
        background-size: cover;
        background-position: center;
        filter: blur(10px); /* Adjust blur ratio here */
        opacity: 0.8; /* Optional: Add slight transparency for a subtle effect */
    }}
    </style>
    """,
    unsafe_allow_html=True
)

# Add the blurred background div
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)

#"""""""""""""""""""""""""   Application Code Starts here   """""""""""""""""""""""""""""""""""""""""""""

# Load the text generation model pipeline
@st.cache_resource
def load_text_generation_model():
    try:
        return pipeline("text-generation", model="thrishala/mental_health_chatbot")
    except Exception as e:
        st.error(f"Error loading model: {e}")
        return None

text_generator = load_text_generation_model()

# Load the counseling dataset
@st.cache_resource
def load_counseling_dataset():
    return load_dataset("Amod/mental_health_counseling_conversations")

dataset = load_counseling_dataset()

# Streamlit App
st.title("Mental Health Counseling Chat")
st.markdown("""
Welcome to the **Mental Health Counseling Chat Application**.  
This platform is designed to provide **supportive, positive, and encouraging responses** based on mental health counseling expertise.
""")

# Check if the model loaded correctly
if text_generator is None:
    st.error("The text generation model could not be loaded. Please check your Hugging Face configuration.")
else:
    # Explore dataset for additional context or resources (optional)
    if st.checkbox("Show Example Questions and Answers from Dataset"):
        sample = dataset["train"].shuffle(seed=42).select(range(3))  # Display 3 random samples
        for example in sample:
            st.markdown(f"**Question:** {example.get('context', 'N/A')}")
            st.markdown(f"**Answer:** {example.get('response', 'N/A')}")
            st.markdown("---")

    # User input for mental health concerns
    user_input = st.text_area("Your question or concern:", placeholder="Type your question here...")

    if st.button("Get Supportive Response"):
        if user_input.strip():
            try:
                # Generate response using the text generation pipeline
                prompt = f"User: {user_input}\nCounselor:"
                response = text_generator(prompt, max_length=200, num_return_sequences=1)
                
                # Extract and display the response
                counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip()
                st.subheader("Counselor's Response:")
                st.write(counselor_reply)
            except Exception as e:
                st.error(f"An error occurred while generating the response: {e}")
        else:
            st.error("Please enter a question or concern to receive a response.")

    # Sidebar resources
    st.sidebar.header("Additional Mental Health Resources")
    st.sidebar.markdown("""
    - [Mental Health Foundation](https://www.mentalhealth.org)
    - [Mind](https://www.mind.org.uk)
    - [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
    """)

    st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")