infogep commited on
Commit
de8ab48
·
verified ·
1 Parent(s): c0c7184

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5913556d423b9b4bddbae635045e1577865cfe90ae4bd4383775c30fed1de431
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:374f7d3f96c037724c7c5329e4eab3a809b4fdbbb9095b031b1a7c497a5e92f9
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61943b5c8fa512a3e998c0c3a654991deeffaa8cee87eade4d977dbd93885536
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9497d4d04ddb7e9089cba9b807d767b815dd343704a171700e5ad9f79830ed9a
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bc572fa878f5e53027d72133d5a005349c43eaf762cabf369a01657ae2f2caf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36f9d4df5fa80b8af0ae9e036c834f318a3d9c628c3b781762ee3d1f9db0527c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:152cf5e1f9adf3dc2c608dbb3e394e09940ab42688cdb5a07d89f1cccb7f89a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6046526432037354,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.0429553264604811,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 12.572,
59
  "eval_steps_per_second": 3.156,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 2.52331425792e+16,
90
  "train_batch_size": 8,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.560912847518921,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.0859106529209622,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 12.572,
59
  "eval_steps_per_second": 3.156,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.05154639175257732,
64
+ "grad_norm": 0.4915436804294586,
65
+ "learning_rate": 6.992307692307692e-06,
66
+ "loss": 1.5171,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.06013745704467354,
71
+ "grad_norm": 0.4138346016407013,
72
+ "learning_rate": 6.215384615384615e-06,
73
+ "loss": 1.5871,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.06872852233676977,
78
+ "grad_norm": 0.5473077297210693,
79
+ "learning_rate": 5.438461538461538e-06,
80
+ "loss": 1.557,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.07731958762886598,
85
+ "grad_norm": 0.7715198397636414,
86
+ "learning_rate": 4.661538461538462e-06,
87
+ "loss": 1.5657,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.0859106529209622,
92
+ "grad_norm": 1.106988787651062,
93
+ "learning_rate": 3.884615384615385e-06,
94
+ "loss": 1.6544,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.0859106529209622,
99
+ "eval_loss": 1.560912847518921,
100
+ "eval_runtime": 38.5363,
101
+ "eval_samples_per_second": 12.715,
102
+ "eval_steps_per_second": 3.192,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 4.9793401356288e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null