WangXFng commited on
Commit
1b2968f
·
verified ·
1 Parent(s): ab4b734

Model save

Browse files
README.md CHANGED
@@ -33,7 +33,7 @@ More information needed
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
- - learning_rate: 1e-05
37
  - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
 
33
  ### Training hyperparameters
34
 
35
  The following hyperparameters were used during training:
36
+ - learning_rate: 0.0003
37
  - train_batch_size: 16
38
  - eval_batch_size: 8
39
  - seed: 42
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "down_proj",
24
- "gate_proj",
25
- "v_proj",
26
- "q_proj",
27
  "k_proj",
 
28
  "o_proj",
29
- "up_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
 
 
 
24
  "k_proj",
25
+ "q_proj",
26
  "o_proj",
27
+ "v_proj",
28
+ "gate_proj",
29
+ "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94850a297cd381640bba24de3a30d3dc5dde5b7b1988dfb3a4e4229f304a02e5
3
  size 1684597880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74ff21d9c7e0e4c65ab5d4449e992f0abfe9cf1812364219522c4314b5ca3863
3
  size 1684597880
trainer_state.json CHANGED
@@ -10,67 +10,67 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
- "grad_norm": 0.4418950378894806,
14
- "learning_rate": 8.786407766990292e-06,
15
- "loss": 1.6619,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
- "grad_norm": 0.5680270791053772,
21
- "learning_rate": 7.572815533980583e-06,
22
- "loss": 0.8461,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
- "grad_norm": 0.8350700736045837,
28
- "learning_rate": 6.3592233009708745e-06,
29
- "loss": 0.7226,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
- "grad_norm": 0.9518159031867981,
35
- "learning_rate": 5.145631067961165e-06,
36
- "loss": 0.6288,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
- "grad_norm": 0.9932662844657898,
42
- "learning_rate": 3.932038834951457e-06,
43
- "loss": 0.5693,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
- "grad_norm": 1.0723854303359985,
49
- "learning_rate": 2.718446601941748e-06,
50
- "loss": 0.538,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
- "grad_norm": 0.9740040898323059,
56
- "learning_rate": 1.5048543689320389e-06,
57
- "loss": 0.5171,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
- "grad_norm": 0.9811394214630127,
63
- "learning_rate": 2.9126213592233014e-07,
64
- "loss": 0.5091,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4514285460762153e+18,
71
- "train_loss": 0.7420612372240973,
72
- "train_runtime": 17990.3108,
73
- "train_samples_per_second": 29.313,
74
  "train_steps_per_second": 0.115
75
  }
76
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.4854368932038835,
13
+ "grad_norm": 0.2625071704387665,
14
+ "learning_rate": 0.00026359223300970874,
15
+ "loss": 0.5849,
16
  "step": 250
17
  },
18
  {
19
  "epoch": 0.970873786407767,
20
+ "grad_norm": 0.2989667057991028,
21
+ "learning_rate": 0.00022718446601941746,
22
+ "loss": 0.3555,
23
  "step": 500
24
  },
25
  {
26
  "epoch": 1.4563106796116505,
27
+ "grad_norm": 0.3658324182033539,
28
+ "learning_rate": 0.0001907766990291262,
29
+ "loss": 0.2941,
30
  "step": 750
31
  },
32
  {
33
  "epoch": 1.941747572815534,
34
+ "grad_norm": 0.3304229974746704,
35
+ "learning_rate": 0.00015436893203883494,
36
+ "loss": 0.2256,
37
  "step": 1000
38
  },
39
  {
40
  "epoch": 2.4271844660194173,
41
+ "grad_norm": 0.2993295192718506,
42
+ "learning_rate": 0.00011796116504854367,
43
+ "loss": 0.1753,
44
  "step": 1250
45
  },
46
  {
47
  "epoch": 2.912621359223301,
48
+ "grad_norm": 0.27283886075019836,
49
+ "learning_rate": 8.155339805825241e-05,
50
+ "loss": 0.1565,
51
  "step": 1500
52
  },
53
  {
54
  "epoch": 3.3980582524271843,
55
+ "grad_norm": 0.26180538535118103,
56
+ "learning_rate": 4.5145631067961155e-05,
57
+ "loss": 0.1362,
58
  "step": 1750
59
  },
60
  {
61
  "epoch": 3.883495145631068,
62
+ "grad_norm": 0.24231906235218048,
63
+ "learning_rate": 8.737864077669902e-06,
64
+ "loss": 0.1306,
65
  "step": 2000
66
  },
67
  {
68
  "epoch": 4.0,
69
  "step": 2060,
70
  "total_flos": 1.4514285460762153e+18,
71
+ "train_loss": 0.25361053179768683,
72
+ "train_runtime": 17986.2826,
73
+ "train_samples_per_second": 29.319,
74
  "train_steps_per_second": 0.115
75
  }
76
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:828428c006ac63ec165c2b2ac1b1c261b5bb0f794d0934d8aaf766389c6b022c
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c27cbe2b66ecac085709c4d00aea675c5f87a4099a5fb6eb37bf0deca39a560
3
  size 5240