Lahiru Menikdiwela commited on
Commit
3ef330a
·
1 Parent(s): d004546

remove quantization due to lack of cuda

Browse files
Files changed (1) hide show
  1. model.py +1 -1
model.py CHANGED
@@ -26,7 +26,7 @@ def get_local_model(model_name_or_path:str)->pipeline:
26
  model = AutoModelForCausalLM.from_pretrained(
27
  model_name_or_path,
28
  torch_dtype=torch.bfloat16,
29
- load_in_4bit = True,
30
  token = hf_token
31
  )
32
  pipe = pipeline(
 
26
  model = AutoModelForCausalLM.from_pretrained(
27
  model_name_or_path,
28
  torch_dtype=torch.bfloat16,
29
+ # load_in_4bit = True,
30
  token = hf_token
31
  )
32
  pipe = pipeline(