Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -43,16 +43,11 @@ st.markdown(
|
|
| 43 |
# Add the blurred background div
|
| 44 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
| 45 |
|
| 46 |
-
# Load the fine-tuned model and tokenizer
|
| 47 |
@st.cache_resource
|
| 48 |
-
def
|
| 49 |
-
|
| 50 |
-
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 51 |
-
model = PeftModel.from_pretrained(base_model, "zementalist/llama-3-8B-chat-psychotherapist")
|
| 52 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
| 53 |
-
return model, tokenizer
|
| 54 |
|
| 55 |
-
|
| 56 |
|
| 57 |
# Load dataset for reference (optional)
|
| 58 |
@st.cache_resource
|
|
|
|
| 43 |
# Add the blurred background div
|
| 44 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
| 45 |
|
|
|
|
| 46 |
@st.cache_resource
|
| 47 |
+
def load_pipeline():
|
| 48 |
+
return pipeline("text-generation", model="QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
+
text_generation_pipe = load_pipeline()
|
| 51 |
|
| 52 |
# Load dataset for reference (optional)
|
| 53 |
@st.cache_resource
|