tahirsher commited on
Commit
fe6ceec
·
verified ·
1 Parent(s): 51a37a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -13
app.py CHANGED
@@ -47,21 +47,18 @@ st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
47
 
48
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
49
 
50
- # Path to the GGUF model file
51
- MODEL_PATH = "QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF/model.gguf"
52
-
53
- # Load Llama model
54
  @st.cache_resource
55
- def load_llama_model():
56
  try:
57
- return Llama(model_path=MODEL_PATH, n_threads=8) # Adjust `n_threads` based on your system
58
  except Exception as e:
59
  st.error(f"Error loading model: {e}")
60
  return None
61
 
62
- llama_model = load_llama_model()
63
 
64
- # Load counseling dataset
65
  @st.cache_resource
66
  def load_counseling_dataset():
67
  return load_dataset("Amod/mental_health_counseling_conversations")
@@ -76,8 +73,8 @@ This platform is designed to provide **supportive, positive, and encouraging res
76
  """)
77
 
78
  # Check if the model loaded correctly
79
- if llama_model is None:
80
- st.error("The text generation model could not be loaded. Please check your configuration.")
81
  else:
82
  # Explore dataset for additional context or resources (optional)
83
  if st.checkbox("Show Example Questions and Answers from Dataset"):
@@ -93,13 +90,14 @@ else:
93
  if st.button("Get Supportive Response"):
94
  if user_input.strip():
95
  try:
96
- # Generate response using Llama
97
  prompt = f"User: {user_input}\nCounselor:"
98
- response = llama_model(prompt, max_tokens=200, stop=["\n", "User:"])
99
 
100
  # Extract and display the response
 
101
  st.subheader("Counselor's Response:")
102
- st.write(response["choices"][0]["text"].strip())
103
  except Exception as e:
104
  st.error(f"An error occurred while generating the response: {e}")
105
  else:
@@ -115,3 +113,4 @@ else:
115
 
116
  st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
117
 
 
 
47
 
48
  #""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
49
 
50
+ # Load the text generation model pipeline
 
 
 
51
  @st.cache_resource
52
+ def load_text_generation_model():
53
  try:
54
+ return pipeline("text-generation", model="thrishala/mental_health_chatbot")
55
  except Exception as e:
56
  st.error(f"Error loading model: {e}")
57
  return None
58
 
59
+ text_generator = load_text_generation_model()
60
 
61
+ # Load the counseling dataset
62
  @st.cache_resource
63
  def load_counseling_dataset():
64
  return load_dataset("Amod/mental_health_counseling_conversations")
 
73
  """)
74
 
75
  # Check if the model loaded correctly
76
+ if text_generator is None:
77
+ st.error("The text generation model could not be loaded. Please check your Hugging Face configuration.")
78
  else:
79
  # Explore dataset for additional context or resources (optional)
80
  if st.checkbox("Show Example Questions and Answers from Dataset"):
 
90
  if st.button("Get Supportive Response"):
91
  if user_input.strip():
92
  try:
93
+ # Generate response using the text generation pipeline
94
  prompt = f"User: {user_input}\nCounselor:"
95
+ response = text_generator(prompt, max_length=200, num_return_sequences=1)
96
 
97
  # Extract and display the response
98
+ counselor_reply = response[0]["generated_text"].split("Counselor:")[-1].strip()
99
  st.subheader("Counselor's Response:")
100
+ st.write(counselor_reply)
101
  except Exception as e:
102
  st.error(f"An error occurred while generating the response: {e}")
103
  else:
 
113
 
114
  st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
115
 
116
+