infogep commited on
Commit
08e8026
·
verified ·
1 Parent(s): f4e57bc

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a50ebb3bf7bb08cce74dfa3745b60c17005fb3c6e793a59641d5aef73cd4ced7
3
  size 91850362
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68930c58fce51d88a4b2b15bfcffc20cb8fbfb74f8d06364ec15ff69a6ef79c8
3
  size 91850362
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c6f8574bd6156f25085f2a6d48d97e252e9242430e1fa6b1aab394343cae47d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b90f23af62305ba960d6f3da9c8fe5215a78ee5d6503478786db71489b3f020
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:152cf5e1f9adf3dc2c608dbb3e394e09940ab42688cdb5a07d89f1cccb7f89a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.02696871628910464,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 59.243,
59
  "eval_steps_per_second": 14.868,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -72,7 +115,7 @@
72
  "early_stopping_threshold": 0.0
73
  },
74
  "attributes": {
75
- "early_stopping_patience_counter": 0
76
  }
77
  },
78
  "TrainerControl": {
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 2552462321909760.0,
90
  "train_batch_size": 8,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.05393743257820928,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 59.243,
59
  "eval_steps_per_second": 14.868,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.032362459546925564,
64
+ "grad_norm": NaN,
65
+ "learning_rate": 6.992307692307692e-06,
66
+ "loss": 0.0,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.037756202804746494,
71
+ "grad_norm": NaN,
72
+ "learning_rate": 6.215384615384615e-06,
73
+ "loss": 0.0,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.043149946062567425,
78
+ "grad_norm": NaN,
79
+ "learning_rate": 5.438461538461538e-06,
80
+ "loss": 0.0,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.04854368932038835,
85
+ "grad_norm": NaN,
86
+ "learning_rate": 4.661538461538462e-06,
87
+ "loss": 0.0,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.05393743257820928,
92
+ "grad_norm": NaN,
93
+ "learning_rate": 3.884615384615385e-06,
94
+ "loss": 0.0,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.05393743257820928,
99
+ "eval_loss": NaN,
100
+ "eval_runtime": 13.1871,
101
+ "eval_samples_per_second": 59.225,
102
+ "eval_steps_per_second": 14.863,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
+ "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 5054876362997760.0,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null