mtasic85 commited on
Commit
9b1eed4
·
1 Parent(s): 586c286

cpt core 4

Browse files
Files changed (1) hide show
  1. scripts/cpt_core_model_4.py +2 -2
scripts/cpt_core_model_4.py CHANGED
@@ -29,7 +29,7 @@ model, tokenizer = FastLanguageModel.from_pretrained(
29
  model = FastLanguageModel.get_peft_model(
30
  model,
31
  # r = 256, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
32
- r = 64, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
33
  target_modules = [
34
  "q_proj", "k_proj", "v_proj", "o_proj",
35
  "gate_proj",
@@ -37,7 +37,7 @@ model = FastLanguageModel.get_peft_model(
37
  "embed_tokens", "lm_head",
38
  ],
39
  # lora_alpha = 32,
40
- lora_alpha = 8,
41
  lora_dropout = 0, # Supports any, but = 0 is optimized
42
  bias = "none", # Supports any, but = "none" is optimized
43
  # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
 
29
  model = FastLanguageModel.get_peft_model(
30
  model,
31
  # r = 256, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
32
+ r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
33
  target_modules = [
34
  "q_proj", "k_proj", "v_proj", "o_proj",
35
  "gate_proj",
 
37
  "embed_tokens", "lm_head",
38
  ],
39
  # lora_alpha = 32,
40
+ lora_alpha = 2,
41
  lora_dropout = 0, # Supports any, but = 0 is optimized
42
  bias = "none", # Supports any, but = "none" is optimized
43
  # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!