mtasic85 commited on
Commit
095febe
·
1 Parent(s): fa96477

cpt core 4

Browse files
Files changed (1) hide show
  1. scripts/cpt_core_model_4.py +4 -2
scripts/cpt_core_model_4.py CHANGED
@@ -28,14 +28,16 @@ model, tokenizer = FastLanguageModel.from_pretrained(
28
 
29
  model = FastLanguageModel.get_peft_model(
30
  model,
31
- r = 256, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
 
32
  target_modules = [
33
  "q_proj", "k_proj", "v_proj", "o_proj",
34
  "gate_proj",
35
  "up_proj", "down_proj",
36
  "embed_tokens", "lm_head",
37
  ],
38
- lora_alpha = 32,
 
39
  lora_dropout = 0, # Supports any, but = 0 is optimized
40
  bias = "none", # Supports any, but = "none" is optimized
41
  # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
 
28
 
29
  model = FastLanguageModel.get_peft_model(
30
  model,
31
+ # r = 256, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
32
+ r = 64, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
33
  target_modules = [
34
  "q_proj", "k_proj", "v_proj", "o_proj",
35
  "gate_proj",
36
  "up_proj", "down_proj",
37
  "embed_tokens", "lm_head",
38
  ],
39
+ # lora_alpha = 32,
40
+ lora_alpha = 8,
41
  lora_dropout = 0, # Supports any, but = 0 is optimized
42
  bias = "none", # Supports any, but = "none" is optimized
43
  # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!