WangXFng commited on
Commit
b891330
·
verified ·
1 Parent(s): 9c5668e

Model save

Browse files
README.md CHANGED
@@ -50,8 +50,7 @@ The following hyperparameters were used during training:
50
 
51
  ### Framework versions
52
 
53
- - PEFT 0.13.0
54
  - Transformers 4.45.2
55
- - Pytorch 2.4.0
56
- - Datasets 2.21.0
57
  - Tokenizers 0.20.0
 
50
 
51
  ### Framework versions
52
 
53
+ - PEFT 0.13.1
54
  - Transformers 4.45.2
55
+ - Pytorch 2.4.1
 
56
  - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "o_proj",
24
  "k_proj",
 
25
  "gate_proj",
26
- "v_proj",
27
  "up_proj",
28
- "q_proj",
29
- "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "k_proj",
24
+ "q_proj",
25
  "gate_proj",
26
+ "down_proj",
27
  "up_proj",
28
+ "o_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07d14821c09af9d8c759cd33f0d395dcdc4e73ecb506c1703f6c540efc107b38
3
  size 1635969696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39922ffb5f02f9d8789a865ec5315b1b6656a7e7016c1af19856418a3a045f90
3
  size 1635969696
trainer_state.json CHANGED
@@ -10,67 +10,67 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 0.9584088921546936,
14
  "learning_rate": 8.794946550048592e-05,
15
- "loss": 0.8701,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 0.7143980264663696,
21
  "learning_rate": 7.580174927113704e-05,
22
- "loss": 0.4271,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 0.627783477306366,
28
  "learning_rate": 6.365403304178815e-05,
29
- "loss": 0.3804,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 0.6241222620010376,
35
  "learning_rate": 5.150631681243926e-05,
36
- "loss": 0.3638,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 0.6115825176239014,
42
  "learning_rate": 3.9358600583090386e-05,
43
- "loss": 0.3522,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 0.639370322227478,
49
  "learning_rate": 2.72108843537415e-05,
50
- "loss": 0.3439,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 0.6484368443489075,
56
  "learning_rate": 1.5063168124392615e-05,
57
- "loss": 0.3313,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 0.6229206919670105,
63
  "learning_rate": 2.915451895043732e-06,
64
- "loss": 0.3234,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4445804612483994e+18,
71
- "train_loss": 0.42104607832084584,
72
- "train_runtime": 30133.5745,
73
- "train_samples_per_second": 17.5,
74
  "train_steps_per_second": 0.068
75
  }
76
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.9415706992149353,
14
  "learning_rate": 8.794946550048592e-05,
15
+ "loss": 0.8636,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.6905472278594971,
21
  "learning_rate": 7.580174927113704e-05,
22
+ "loss": 0.4257,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.6437392234802246,
28
  "learning_rate": 6.365403304178815e-05,
29
+ "loss": 0.3792,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.6311036348342896,
35
  "learning_rate": 5.150631681243926e-05,
36
+ "loss": 0.3628,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.6062882542610168,
42
  "learning_rate": 3.9358600583090386e-05,
43
+ "loss": 0.3511,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.6469098925590515,
49
  "learning_rate": 2.72108843537415e-05,
50
+ "loss": 0.3425,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.6484191417694092,
56
  "learning_rate": 1.5063168124392615e-05,
57
+ "loss": 0.329,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.6347299218177795,
63
  "learning_rate": 2.915451895043732e-06,
64
+ "loss": 0.3209,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4445804612483994e+18,
71
+ "train_loss": 0.4188447378214123,
72
+ "train_runtime": 30284.0025,
73
+ "train_samples_per_second": 17.413,
74
  "train_steps_per_second": 0.068
75
  }
76
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81da159f8e27ac8bb61e3f1aa284bb40d5b444177144114b63e2020a44f0308a
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef97ff5f47dce62db3654e9a89a27e915a0ff99aa25be9c7a88d60a99a91224
3
  size 5240