ayuzawa commited on
Commit
1817c1b
·
verified ·
1 Parent(s): a3c0b5e

Model save

Browse files
README.md CHANGED
@@ -42,7 +42,7 @@ The following hyperparameters were used during training:
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
- - num_epochs: 200
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
 
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
+ - num_epochs: 1
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
adapter_config.json CHANGED
@@ -26,13 +26,13 @@
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
29
- "gate_proj",
30
- "down_proj",
31
- "q_proj",
32
  "v_proj",
 
33
  "up_proj",
 
 
34
  "o_proj",
35
- "k_proj"
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
 
26
  "rank_pattern": {},
27
  "revision": null,
28
  "target_modules": [
 
 
 
29
  "v_proj",
30
+ "q_proj",
31
  "up_proj",
32
+ "down_proj",
33
+ "k_proj",
34
  "o_proj",
35
+ "gate_proj"
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2683308a30558ca889407a1ffeb0cd1fe45632a17e28fc7083426396fd5b3846
3
  size 125866776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9df8be3d456dcc78d7ad14050738589f046087b89f227f8e1da5ec26e032ee4b
3
  size 125866776
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:696556ef4dee1a9991278385df2ab089a6b669e63fcab51ea38414b5a147ca93
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d23a1f32ab6bce0f981768bbac4bf8756b5e2cc4c71db976163be7a16401af7
3
  size 5304