infogep commited on
Commit
690c02b
·
verified ·
1 Parent(s): 25e373b

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f36846934f00f1a43b88f2b423df5c7fb81d806bee717541a5c991361348e938
3
  size 198011252
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23ec1a37f0f59fc6a05e9063f15da20a868ccbdb2dafe7ad28de654b4c53e73d
3
  size 198011252
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bff53100a06c7d9ec5de5d4d36860f8b452a856e09537e514ef940c12752dcb0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:facf61e19608174386a390900e13d0aa47506868d94138ed8774a017c650c68e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:152cf5e1f9adf3dc2c608dbb3e394e09940ab42688cdb5a07d89f1cccb7f89a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.037091988130563795,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 30.608,
59
  "eval_steps_per_second": 7.652,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -72,7 +115,7 @@
72
  "early_stopping_threshold": 0.0
73
  },
74
  "attributes": {
75
- "early_stopping_patience_counter": 0
76
  }
77
  },
78
  "TrainerControl": {
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 7166368520601600.0,
90
  "train_batch_size": 8,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.07418397626112759,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 30.608,
59
  "eval_steps_per_second": 7.652,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.04451038575667656,
64
+ "grad_norm": NaN,
65
+ "learning_rate": 6.992307692307692e-06,
66
+ "loss": 0.0,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.05192878338278932,
71
+ "grad_norm": NaN,
72
+ "learning_rate": 6.215384615384615e-06,
73
+ "loss": 0.0,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.05934718100890208,
78
+ "grad_norm": NaN,
79
+ "learning_rate": 5.438461538461538e-06,
80
+ "loss": 0.0,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.06676557863501484,
85
+ "grad_norm": NaN,
86
+ "learning_rate": 4.661538461538462e-06,
87
+ "loss": 0.0,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.07418397626112759,
92
+ "grad_norm": NaN,
93
+ "learning_rate": 3.884615384615385e-06,
94
+ "loss": 0.0,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.07418397626112759,
99
+ "eval_loss": NaN,
100
+ "eval_runtime": 18.5563,
101
+ "eval_samples_per_second": 30.61,
102
+ "eval_steps_per_second": 7.652,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
+ "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 1.43327370412032e+16,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null