Daemontatox commited on
Commit
75f78f2
·
verified ·
1 Parent(s): a76f205

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -181,7 +181,7 @@ quantization_config = BitsAndBytesConfig(
181
 
182
 
183
 
184
- model_id = "Daemontatox/MawaredT3"
185
  tokenizer = AutoTokenizer.from_pretrained(model_id)
186
 
187
  model = AutoModelForCausalLM.from_pretrained(
@@ -189,7 +189,7 @@ model = AutoModelForCausalLM.from_pretrained(
189
  torch_dtype=torch.float16,
190
  device_map="cuda",
191
  attn_implementation="flash_attention_2",
192
- quantization_config=quantization_config
193
  )
194
 
195
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=8192 )
 
181
 
182
 
183
 
184
+ model_id = "Daemontatox/Mawared_GS"
185
  tokenizer = AutoTokenizer.from_pretrained(model_id)
186
 
187
  model = AutoModelForCausalLM.from_pretrained(
 
189
  torch_dtype=torch.float16,
190
  device_map="cuda",
191
  attn_implementation="flash_attention_2",
192
+ #quantization_config=quantization_config
193
  )
194
 
195
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=8192 )