Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -47,18 +47,21 @@ st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
|
47 |
|
48 |
#""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
|
49 |
|
50 |
-
#
|
|
|
|
|
|
|
51 |
@st.cache_resource
|
52 |
-
def
|
53 |
try:
|
54 |
-
return
|
55 |
except Exception as e:
|
56 |
st.error(f"Error loading model: {e}")
|
57 |
return None
|
58 |
|
59 |
-
|
60 |
|
61 |
-
# Load
|
62 |
@st.cache_resource
|
63 |
def load_counseling_dataset():
|
64 |
return load_dataset("Amod/mental_health_counseling_conversations")
|
@@ -73,8 +76,8 @@ This platform is designed to provide **supportive, positive, and encouraging res
|
|
73 |
""")
|
74 |
|
75 |
# Check if the model loaded correctly
|
76 |
-
if
|
77 |
-
st.error("The text generation model could not be loaded. Please check your
|
78 |
else:
|
79 |
# Explore dataset for additional context or resources (optional)
|
80 |
if st.checkbox("Show Example Questions and Answers from Dataset"):
|
@@ -90,14 +93,13 @@ else:
|
|
90 |
if st.button("Get Supportive Response"):
|
91 |
if user_input.strip():
|
92 |
try:
|
93 |
-
# Generate response using
|
94 |
prompt = f"User: {user_input}\nCounselor:"
|
95 |
-
response =
|
96 |
|
97 |
# Extract and display the response
|
98 |
-
counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip()
|
99 |
st.subheader("Counselor's Response:")
|
100 |
-
st.write(
|
101 |
except Exception as e:
|
102 |
st.error(f"An error occurred while generating the response: {e}")
|
103 |
else:
|
@@ -112,3 +114,4 @@ else:
|
|
112 |
""")
|
113 |
|
114 |
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|
|
|
|
47 |
|
48 |
#""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
|
49 |
|
50 |
+
# Path to the GGUF model file
|
51 |
+
MODEL_PATH = "QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF/model.gguf"
|
52 |
+
|
53 |
+
# Load Llama model
|
54 |
@st.cache_resource
|
55 |
+
def load_llama_model():
|
56 |
try:
|
57 |
+
return Llama(model_path=MODEL_PATH, n_threads=8) # Adjust `n_threads` based on your system
|
58 |
except Exception as e:
|
59 |
st.error(f"Error loading model: {e}")
|
60 |
return None
|
61 |
|
62 |
+
llama_model = load_llama_model()
|
63 |
|
64 |
+
# Load counseling dataset
|
65 |
@st.cache_resource
|
66 |
def load_counseling_dataset():
|
67 |
return load_dataset("Amod/mental_health_counseling_conversations")
|
|
|
76 |
""")
|
77 |
|
78 |
# Check if the model loaded correctly
|
79 |
+
if llama_model is None:
|
80 |
+
st.error("The text generation model could not be loaded. Please check your configuration.")
|
81 |
else:
|
82 |
# Explore dataset for additional context or resources (optional)
|
83 |
if st.checkbox("Show Example Questions and Answers from Dataset"):
|
|
|
93 |
if st.button("Get Supportive Response"):
|
94 |
if user_input.strip():
|
95 |
try:
|
96 |
+
# Generate response using Llama
|
97 |
prompt = f"User: {user_input}\nCounselor:"
|
98 |
+
response = llama_model(prompt, max_tokens=200, stop=["\n", "User:"])
|
99 |
|
100 |
# Extract and display the response
|
|
|
101 |
st.subheader("Counselor's Response:")
|
102 |
+
st.write(response["choices"][0]["text"].strip())
|
103 |
except Exception as e:
|
104 |
st.error(f"An error occurred while generating the response: {e}")
|
105 |
else:
|
|
|
114 |
""")
|
115 |
|
116 |
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|
117 |
+
|