|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.014035481697731866, |
|
"eval_steps": 63, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.614192679092747e-05, |
|
"eval_loss": NaN, |
|
"eval_runtime": 202.2991, |
|
"eval_samples_per_second": 18.537, |
|
"eval_steps_per_second": 9.268, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005614192679092747, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019979453927503364, |
|
"loss": 0.0783, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0011228385358185494, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019815591569910654, |
|
"loss": 0.0586, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0016842578037278239, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019490557470106686, |
|
"loss": 0.1994, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0022456770716370988, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.002807096339546373, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018380881048918405, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0033685156074556477, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017614459583691346, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0035369413878284304, |
|
"eval_loss": NaN, |
|
"eval_runtime": 201.4736, |
|
"eval_samples_per_second": 18.613, |
|
"eval_steps_per_second": 9.306, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.003929934875364922, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001672300890261317, |
|
"loss": 0.139, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0044913541432741975, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015721166601221698, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.005052773411183472, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014625382902408356, |
|
"loss": 0.1583, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.005614192679092746, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013453650544213076, |
|
"loss": 1.9159, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.006175611947002021, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 0.265, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0067370312149112955, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010960230259076818, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.007073882775656861, |
|
"eval_loss": NaN, |
|
"eval_runtime": 201.5026, |
|
"eval_samples_per_second": 18.61, |
|
"eval_steps_per_second": 9.305, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.007298450482820571, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.679484224283449e-05, |
|
"loss": 0.4924, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.007859869750729845, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.404001049666211e-05, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.00842128901863912, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.154724133689677e-05, |
|
"loss": 0.0956, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.008982708286548395, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.952166568776062e-05, |
|
"loss": 0.042, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.009544127554457668, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.8160743168947496e-05, |
|
"loss": 0.6566, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.010105546822366944, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.010610824163485291, |
|
"eval_loss": NaN, |
|
"eval_runtime": 201.5094, |
|
"eval_samples_per_second": 18.61, |
|
"eval_steps_per_second": 9.305, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.010666966090276219, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8165064990227252e-05, |
|
"loss": 0.0509, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.011228385358185492, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.985863781320435e-05, |
|
"loss": 0.0291, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.011789804626094767, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.286812958766106e-05, |
|
"loss": 0.0899, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.012351223894004042, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.308324265397836e-06, |
|
"loss": 0.0206, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.012912643161913318, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.270513696097055e-06, |
|
"loss": 0.0575, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.013474062429822591, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.209986176753948e-07, |
|
"loss": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.014035481697731866, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.1379, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 63, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.814564604097331e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|