lleticiasilvaa commited on
Commit
93367ee
·
verified ·
1 Parent(s): 6cbed37

Training in progress, step 1500, checkpoint

Browse files
checkpoint-1500/adapter_config.json CHANGED
@@ -27,11 +27,11 @@
27
  "revision": null,
28
  "target_modules": [
29
  "k_proj",
30
- "down_proj",
31
- "v_proj",
32
- "o_proj",
33
  "gate_proj",
34
  "q_proj",
 
 
 
35
  "up_proj"
36
  ],
37
  "task_type": null,
 
27
  "revision": null,
28
  "target_modules": [
29
  "k_proj",
 
 
 
30
  "gate_proj",
31
  "q_proj",
32
+ "v_proj",
33
+ "o_proj",
34
+ "down_proj",
35
  "up_proj"
36
  ],
37
  "task_type": null,
checkpoint-1500/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:736f9428472d49ff9bb10a089262016c44e499be7a443d0bf223235834eb7f52
3
  size 400616360
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9698ea5b1699956d3ef5f28fd15a9097f914db4fac2e26ac81e9451e8e918213
3
  size 400616360
checkpoint-1500/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9436e62b9451796e81f7647b2b8833e3eecd0409fd1d8f10bbc76514742a79c4
3
  size 205100562
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10511162df0f726b38aeb6b98969b34695df0197203ebe76b60f019e22167883
3
  size 205100562
checkpoint-1500/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3c1e7da703c784fbc3d8152b1fc8d564276ea9ef45d6aa522d9bed4c7ac16ba
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46fdba27324dcfa656fab0568648f1c046ad9feeecdb7f542c62dd255167ea2a
3
  size 14308
checkpoint-1500/trainer_state.json CHANGED
@@ -70,32 +70,32 @@
70
  },
71
  {
72
  "epoch": 1.167114015637764,
73
- "grad_norm": 2.512282609939575,
74
  "learning_rate": 3.915853581228413e-05,
75
- "loss": 0.614,
76
  "step": 1250
77
  },
78
  {
79
  "epoch": 1.167114015637764,
80
- "eval_loss": 0.1132238432765007,
81
- "eval_runtime": 22.1933,
82
- "eval_samples_per_second": 3.92,
83
- "eval_steps_per_second": 3.92,
84
  "step": 1250
85
  },
86
  {
87
  "epoch": 1.4005134788189988,
88
- "grad_norm": 1.3006846904754639,
89
  "learning_rate": 2.1903963223439395e-05,
90
- "loss": 0.6601,
91
  "step": 1500
92
  },
93
  {
94
  "epoch": 1.4005134788189988,
95
- "eval_loss": 0.10768163949251175,
96
- "eval_runtime": 22.1842,
97
- "eval_samples_per_second": 3.922,
98
- "eval_steps_per_second": 3.922,
99
  "step": 1500
100
  }
101
  ],
@@ -116,7 +116,7 @@
116
  "attributes": {}
117
  }
118
  },
119
- "total_flos": 1.2348081207667507e+17,
120
  "train_batch_size": 1,
121
  "trial_name": null,
122
  "trial_params": null
 
70
  },
71
  {
72
  "epoch": 1.167114015637764,
73
+ "grad_norm": 2.2942659854888916,
74
  "learning_rate": 3.915853581228413e-05,
75
+ "loss": 0.7993,
76
  "step": 1250
77
  },
78
  {
79
  "epoch": 1.167114015637764,
80
+ "eval_loss": 0.1208883598446846,
81
+ "eval_runtime": 21.9734,
82
+ "eval_samples_per_second": 3.959,
83
+ "eval_steps_per_second": 3.959,
84
  "step": 1250
85
  },
86
  {
87
  "epoch": 1.4005134788189988,
88
+ "grad_norm": 1.147830843925476,
89
  "learning_rate": 2.1903963223439395e-05,
90
+ "loss": 0.7032,
91
  "step": 1500
92
  },
93
  {
94
  "epoch": 1.4005134788189988,
95
+ "eval_loss": 0.10965924710035324,
96
+ "eval_runtime": 22.0058,
97
+ "eval_samples_per_second": 3.954,
98
+ "eval_steps_per_second": 3.954,
99
  "step": 1500
100
  }
101
  ],
 
116
  "attributes": {}
117
  }
118
  },
119
+ "total_flos": 1.228651871265792e+17,
120
  "train_batch_size": 1,
121
  "trial_name": null,
122
  "trial_params": null
checkpoint-1500/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37d85e10062490083f3df78142b22b936ee4c10ad7bb1c35d68c6a9743be03a9
3
  size 5560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:944c7c22023831a73ee4b0a66805723ecef65f25064cb419b24a8d84b3daee22
3
  size 5560