htlou's picture
Upload folder using huggingface_hub
d59c8e2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 50,
"global_step": 222,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06756756756756757,
"grad_norm": 18.415670170963544,
"learning_rate": 5e-07,
"loss": 1.7484,
"step": 5
},
{
"epoch": 0.13513513513513514,
"grad_norm": 12.979363840068762,
"learning_rate": 1e-06,
"loss": 1.6,
"step": 10
},
{
"epoch": 0.20270270270270271,
"grad_norm": 7.702031949468892,
"learning_rate": 9.98628141419305e-07,
"loss": 1.3118,
"step": 15
},
{
"epoch": 0.2702702702702703,
"grad_norm": 3.8773275511437153,
"learning_rate": 9.94520093661082e-07,
"loss": 1.1295,
"step": 20
},
{
"epoch": 0.33783783783783783,
"grad_norm": 4.237402807418457,
"learning_rate": 9.876983993675989e-07,
"loss": 1.1044,
"step": 25
},
{
"epoch": 0.40540540540540543,
"grad_norm": 3.642461133839574,
"learning_rate": 9.78200492138261e-07,
"loss": 1.0105,
"step": 30
},
{
"epoch": 0.47297297297297297,
"grad_norm": 3.608837853456925,
"learning_rate": 9.66078491115194e-07,
"loss": 0.991,
"step": 35
},
{
"epoch": 0.5405405405405406,
"grad_norm": 3.79449822480748,
"learning_rate": 9.513989149828717e-07,
"loss": 0.9674,
"step": 40
},
{
"epoch": 0.6081081081081081,
"grad_norm": 3.6384041234003823,
"learning_rate": 9.342423169512071e-07,
"loss": 0.9507,
"step": 45
},
{
"epoch": 0.6756756756756757,
"grad_norm": 3.47968028739859,
"learning_rate": 9.147028427251009e-07,
"loss": 0.9386,
"step": 50
},
{
"epoch": 0.6756756756756757,
"eval_loss": 0.9245508313179016,
"eval_runtime": 36.7866,
"eval_samples_per_second": 57.086,
"eval_steps_per_second": 0.897,
"step": 50
},
{
"epoch": 0.7432432432432432,
"grad_norm": 3.488579700124589,
"learning_rate": 8.928877138860706e-07,
"loss": 0.946,
"step": 55
},
{
"epoch": 0.8108108108108109,
"grad_norm": 3.3962328364213157,
"learning_rate": 8.689166395208636e-07,
"loss": 0.9186,
"step": 60
},
{
"epoch": 0.8783783783783784,
"grad_norm": 3.719808626989404,
"learning_rate": 8.429211593257052e-07,
"loss": 0.9203,
"step": 65
},
{
"epoch": 0.9459459459459459,
"grad_norm": 3.449706597029694,
"learning_rate": 8.150439217908556e-07,
"loss": 0.9104,
"step": 70
},
{
"epoch": 1.0135135135135136,
"grad_norm": 3.767188694894111,
"learning_rate": 7.854379014263876e-07,
"loss": 0.8925,
"step": 75
},
{
"epoch": 1.0810810810810811,
"grad_norm": 3.530860850867838,
"learning_rate": 7.542655593246103e-07,
"loss": 0.8628,
"step": 80
},
{
"epoch": 1.1486486486486487,
"grad_norm": 3.839676176721922,
"learning_rate": 7.216979516654943e-07,
"loss": 0.8289,
"step": 85
},
{
"epoch": 1.2162162162162162,
"grad_norm": 3.629466135685692,
"learning_rate": 6.87913791057119e-07,
"loss": 0.8346,
"step": 90
},
{
"epoch": 1.2837837837837838,
"grad_norm": 3.758252497307305,
"learning_rate": 6.530984658619733e-07,
"loss": 0.8442,
"step": 95
},
{
"epoch": 1.3513513513513513,
"grad_norm": 3.6510886764874098,
"learning_rate": 6.174430228904919e-07,
"loss": 0.8263,
"step": 100
},
{
"epoch": 1.3513513513513513,
"eval_loss": 0.8765044808387756,
"eval_runtime": 36.4454,
"eval_samples_per_second": 57.62,
"eval_steps_per_second": 0.905,
"step": 100
},
{
"epoch": 1.4189189189189189,
"grad_norm": 3.5720282153519287,
"learning_rate": 5.8114311904423e-07,
"loss": 0.8266,
"step": 105
},
{
"epoch": 1.4864864864864864,
"grad_norm": 3.362670058366568,
"learning_rate": 5.443979476614674e-07,
"loss": 0.8094,
"step": 110
},
{
"epoch": 1.554054054054054,
"grad_norm": 3.725544496637925,
"learning_rate": 5.074091454568463e-07,
"loss": 0.8107,
"step": 115
},
{
"epoch": 1.6216216216216215,
"grad_norm": 3.399706437395112,
"learning_rate": 4.703796860531429e-07,
"loss": 0.8112,
"step": 120
},
{
"epoch": 1.689189189189189,
"grad_norm": 3.4945021536853718,
"learning_rate": 4.3351276617684285e-07,
"loss": 0.8197,
"step": 125
},
{
"epoch": 1.7567567567567568,
"grad_norm": 3.423613980661302,
"learning_rate": 3.970106906294509e-07,
"loss": 0.8158,
"step": 130
},
{
"epoch": 1.8243243243243243,
"grad_norm": 3.4537863970020117,
"learning_rate": 3.610737621531781e-07,
"loss": 0.8297,
"step": 135
},
{
"epoch": 1.8918918918918919,
"grad_norm": 3.4702507620367875,
"learning_rate": 3.2589918228280066e-07,
"loss": 0.8031,
"step": 140
},
{
"epoch": 1.9594594594594594,
"grad_norm": 3.3271823791904884,
"learning_rate": 2.916799692151884e-07,
"loss": 0.8016,
"step": 145
},
{
"epoch": 2.027027027027027,
"grad_norm": 3.9798718651465848,
"learning_rate": 2.5860389863462763e-07,
"loss": 0.7963,
"step": 150
},
{
"epoch": 2.027027027027027,
"eval_loss": 0.8589270710945129,
"eval_runtime": 36.48,
"eval_samples_per_second": 57.566,
"eval_steps_per_second": 0.905,
"step": 150
},
{
"epoch": 2.0945945945945947,
"grad_norm": 3.542726689032495,
"learning_rate": 2.2685247330608414e-07,
"loss": 0.7712,
"step": 155
},
{
"epoch": 2.1621621621621623,
"grad_norm": 3.4799701202917244,
"learning_rate": 1.9659992709070344e-07,
"loss": 0.7662,
"step": 160
},
{
"epoch": 2.22972972972973,
"grad_norm": 3.594791003920676,
"learning_rate": 1.6801226884893893e-07,
"loss": 0.7538,
"step": 165
},
{
"epoch": 2.2972972972972974,
"grad_norm": 3.5480210206929637,
"learning_rate": 1.412463714778343e-07,
"loss": 0.7731,
"step": 170
},
{
"epoch": 2.364864864864865,
"grad_norm": 3.5836803836525606,
"learning_rate": 1.1644911108130434e-07,
"loss": 0.7792,
"step": 175
},
{
"epoch": 2.4324324324324325,
"grad_norm": 3.6501667972586693,
"learning_rate": 9.375656099715934e-08,
"loss": 0.757,
"step": 180
},
{
"epoch": 2.5,
"grad_norm": 3.686290348200621,
"learning_rate": 7.329324510360269e-08,
"loss": 0.7426,
"step": 185
},
{
"epoch": 2.5675675675675675,
"grad_norm": 3.574089956841002,
"learning_rate": 5.517145450262639e-08,
"loss": 0.754,
"step": 190
},
{
"epoch": 2.635135135135135,
"grad_norm": 3.6761723022132617,
"learning_rate": 3.9490631329964554e-08,
"loss": 0.7604,
"step": 195
},
{
"epoch": 2.7027027027027026,
"grad_norm": 3.5287319124762004,
"learning_rate": 2.63368230729043e-08,
"loss": 0.7574,
"step": 200
},
{
"epoch": 2.7027027027027026,
"eval_loss": 0.8600885272026062,
"eval_runtime": 36.5528,
"eval_samples_per_second": 57.451,
"eval_steps_per_second": 0.903,
"step": 200
},
{
"epoch": 2.77027027027027,
"grad_norm": 3.4745956970229845,
"learning_rate": 1.5782210390350713e-08,
"loss": 0.7485,
"step": 205
},
{
"epoch": 2.8378378378378377,
"grad_norm": 3.5410239237454446,
"learning_rate": 7.884711026201584e-09,
"loss": 0.7526,
"step": 210
},
{
"epoch": 2.9054054054054053,
"grad_norm": 3.717018997325894,
"learning_rate": 2.687661989531964e-09,
"loss": 0.7473,
"step": 215
},
{
"epoch": 2.972972972972973,
"grad_norm": 3.6243019256758044,
"learning_rate": 2.1958174560282594e-10,
"loss": 0.7482,
"step": 220
},
{
"epoch": 3.0,
"step": 222,
"total_flos": 1308778674782208.0,
"train_loss": 0.8914520751248609,
"train_runtime": 3092.5546,
"train_samples_per_second": 18.334,
"train_steps_per_second": 0.072
}
],
"logging_steps": 5,
"max_steps": 222,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1308778674782208.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}