cheberle commited on
Commit
57b74e3
·
1 Parent(s): a863b4f
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -13,7 +13,7 @@ print("PEFT Base Model:", peft_config.base_model_name_or_path)
13
 
14
  # 2. Load the tokenizer & base model
15
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
16
- base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, revision="4831ee1375be5b4ff5a4abf7984e13628db44e35", trust_remote_code=True)
17
 
18
  # 3. Load your LoRA adapter weights onto the base model
19
  model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
 
13
 
14
  # 2. Load the tokenizer & base model
15
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
16
+ base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, revision="4831ee1375be5b4ff5a4abf7984e13628db44e35", ignore_mismatched_sizes=True, trust_remote_code=True)
17
 
18
  # 3. Load your LoRA adapter weights onto the base model
19
  model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)