Daemontatox commited on
Commit
77f6cf9
·
verified ·
1 Parent(s): 55c56d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -11,7 +11,7 @@ from transformers import (
11
  StoppingCriteriaList
12
  )
13
 
14
- MODEL_ID ="Daemontatox/PathfinderAI5.0"
15
 
16
  DEFAULT_SYSTEM_PROMPT = """
17
 
@@ -129,10 +129,10 @@ class StopOnTokens(StoppingCriteria):
129
 
130
  def initialize_model():
131
  quantization_config = BitsAndBytesConfig(
132
- load_in_4bit=True,
133
- bnb_4bit_compute_dtype=torch.bfloat16,
134
- bnb_4bit_quant_type="nf4",
135
- bnb_4bit_use_double_quant=True,
136
  )
137
 
138
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
@@ -205,7 +205,7 @@ model, tokenizer = initialize_model()
205
  with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
206
  gr.Markdown("""
207
  <h1 align="center">🧠 AI Reasoning Assistant</h1>
208
- <p align="center">Ask me Hatd questions</p>
209
  """)
210
 
211
  chatbot = gr.Chatbot(label="Conversation", elem_id="chatbot")
 
11
  StoppingCriteriaList
12
  )
13
 
14
+ MODEL_ID ="cognitivecomputations/Dolphin3.0-R1-Mistral-24B"
15
 
16
  DEFAULT_SYSTEM_PROMPT = """
17
 
 
129
 
130
  def initialize_model():
131
  quantization_config = BitsAndBytesConfig(
132
+ load_in_8bit=True,
133
+ bnb_8bit_compute_dtype=torch.bfloat16,
134
+ bnb_8bit_quant_type="nf4",
135
+ bnb_8bit_use_double_quant=True,
136
  )
137
 
138
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
 
205
  with gr.Blocks(css=CSS, theme=gr.themes.Soft()) as demo:
206
  gr.Markdown("""
207
  <h1 align="center">🧠 AI Reasoning Assistant</h1>
208
+ <p align="center">Ask me Hard questions</p>
209
  """)
210
 
211
  chatbot = gr.Chatbot(label="Conversation", elem_id="chatbot")