app.py
CHANGED
@@ -13,7 +13,7 @@ print("PEFT Base Model:", peft_config.base_model_name_or_path)
|
|
13 |
|
14 |
# 2. Load the tokenizer & base model
|
15 |
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
|
16 |
-
base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, trust_remote_code=True)
|
17 |
|
18 |
# 3. Load your LoRA adapter weights onto the base model
|
19 |
model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
|
|
|
13 |
|
14 |
# 2. Load the tokenizer & base model
|
15 |
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
|
16 |
+
base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, revision="4831ee1375be5b4ff5a4abf7984e13628db44e35", trust_remote_code=True)
|
17 |
|
18 |
# 3. Load your LoRA adapter weights onto the base model
|
19 |
model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
|