ngwgsang's picture
Training in progress, epoch 4, checkpoint
75dabab verified
raw
history blame
8.5 kB
{
"best_metric": 5.645811716715495,
"best_model_checkpoint": "./results/checkpoint-2748",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 3664,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1091703056768559,
"grad_norm": 25.480337142944336,
"learning_rate": 2.959061135371179e-05,
"loss": 56.923,
"step": 100
},
{
"epoch": 0.2183406113537118,
"grad_norm": 39.87223815917969,
"learning_rate": 2.918122270742358e-05,
"loss": 46.6475,
"step": 200
},
{
"epoch": 0.32751091703056767,
"grad_norm": 48.05048751831055,
"learning_rate": 2.877183406113537e-05,
"loss": 33.6867,
"step": 300
},
{
"epoch": 0.4366812227074236,
"grad_norm": 31.941883087158203,
"learning_rate": 2.8362445414847164e-05,
"loss": 21.1084,
"step": 400
},
{
"epoch": 0.5458515283842795,
"grad_norm": 55.025856018066406,
"learning_rate": 2.7953056768558954e-05,
"loss": 12.9495,
"step": 500
},
{
"epoch": 0.6550218340611353,
"grad_norm": 34.957523345947266,
"learning_rate": 2.7543668122270742e-05,
"loss": 10.0745,
"step": 600
},
{
"epoch": 0.7641921397379913,
"grad_norm": 24.020906448364258,
"learning_rate": 2.7134279475982533e-05,
"loss": 8.3541,
"step": 700
},
{
"epoch": 0.8733624454148472,
"grad_norm": 32.709571838378906,
"learning_rate": 2.6724890829694323e-05,
"loss": 7.5128,
"step": 800
},
{
"epoch": 0.982532751091703,
"grad_norm": 38.94672393798828,
"learning_rate": 2.6315502183406114e-05,
"loss": 7.2241,
"step": 900
},
{
"epoch": 1.0,
"eval_avg_mae": 7.529487609863281,
"eval_loss": 7.529487609863281,
"eval_mae_lex": 6.992014408111572,
"eval_mae_sem": 5.432034492492676,
"eval_mae_syn": 10.164413452148438,
"eval_runtime": 27.1764,
"eval_samples_per_second": 269.609,
"eval_steps_per_second": 8.426,
"step": 916
},
{
"epoch": 1.091703056768559,
"grad_norm": 26.391666412353516,
"learning_rate": 2.5906113537117905e-05,
"loss": 6.7699,
"step": 1000
},
{
"epoch": 1.2008733624454149,
"grad_norm": 22.994029998779297,
"learning_rate": 2.5496724890829696e-05,
"loss": 6.5552,
"step": 1100
},
{
"epoch": 1.3100436681222707,
"grad_norm": 20.722883224487305,
"learning_rate": 2.5087336244541486e-05,
"loss": 6.5897,
"step": 1200
},
{
"epoch": 1.4192139737991267,
"grad_norm": 32.02668380737305,
"learning_rate": 2.4677947598253277e-05,
"loss": 6.5073,
"step": 1300
},
{
"epoch": 1.5283842794759825,
"grad_norm": 32.40359115600586,
"learning_rate": 2.4268558951965064e-05,
"loss": 6.4684,
"step": 1400
},
{
"epoch": 1.6375545851528384,
"grad_norm": 47.73025131225586,
"learning_rate": 2.3859170305676855e-05,
"loss": 6.3165,
"step": 1500
},
{
"epoch": 1.7467248908296944,
"grad_norm": 47.35511016845703,
"learning_rate": 2.344978165938865e-05,
"loss": 6.2866,
"step": 1600
},
{
"epoch": 1.8558951965065502,
"grad_norm": 44.51765441894531,
"learning_rate": 2.3040393013100437e-05,
"loss": 6.3404,
"step": 1700
},
{
"epoch": 1.965065502183406,
"grad_norm": 26.496959686279297,
"learning_rate": 2.2631004366812227e-05,
"loss": 6.1681,
"step": 1800
},
{
"epoch": 2.0,
"eval_avg_mae": 6.067600250244141,
"eval_loss": 6.067600250244141,
"eval_mae_lex": 5.595421314239502,
"eval_mae_sem": 4.1164045333862305,
"eval_mae_syn": 8.490975379943848,
"eval_runtime": 27.2193,
"eval_samples_per_second": 269.184,
"eval_steps_per_second": 8.413,
"step": 1832
},
{
"epoch": 2.074235807860262,
"grad_norm": 36.847415924072266,
"learning_rate": 2.2221615720524018e-05,
"loss": 5.9928,
"step": 1900
},
{
"epoch": 2.183406113537118,
"grad_norm": 37.08506393432617,
"learning_rate": 2.181222707423581e-05,
"loss": 5.9648,
"step": 2000
},
{
"epoch": 2.2925764192139737,
"grad_norm": 35.595909118652344,
"learning_rate": 2.1402838427947596e-05,
"loss": 5.8648,
"step": 2100
},
{
"epoch": 2.4017467248908297,
"grad_norm": 23.82405662536621,
"learning_rate": 2.099344978165939e-05,
"loss": 5.9043,
"step": 2200
},
{
"epoch": 2.5109170305676853,
"grad_norm": 30.872852325439453,
"learning_rate": 2.058406113537118e-05,
"loss": 5.8428,
"step": 2300
},
{
"epoch": 2.6200873362445414,
"grad_norm": 42.079261779785156,
"learning_rate": 2.0174672489082972e-05,
"loss": 5.8529,
"step": 2400
},
{
"epoch": 2.7292576419213974,
"grad_norm": 23.549190521240234,
"learning_rate": 1.976528384279476e-05,
"loss": 5.8328,
"step": 2500
},
{
"epoch": 2.8384279475982535,
"grad_norm": 32.223079681396484,
"learning_rate": 1.935589519650655e-05,
"loss": 5.8484,
"step": 2600
},
{
"epoch": 2.947598253275109,
"grad_norm": 25.67125129699707,
"learning_rate": 1.894650655021834e-05,
"loss": 5.5861,
"step": 2700
},
{
"epoch": 3.0,
"eval_avg_mae": 5.645811716715495,
"eval_loss": 5.645811080932617,
"eval_mae_lex": 4.994715213775635,
"eval_mae_sem": 3.6993861198425293,
"eval_mae_syn": 8.24333381652832,
"eval_runtime": 27.1472,
"eval_samples_per_second": 269.899,
"eval_steps_per_second": 8.435,
"step": 2748
},
{
"epoch": 3.056768558951965,
"grad_norm": 31.40544319152832,
"learning_rate": 1.8537117903930135e-05,
"loss": 5.4937,
"step": 2800
},
{
"epoch": 3.165938864628821,
"grad_norm": 28.67197608947754,
"learning_rate": 1.8127729257641922e-05,
"loss": 5.5573,
"step": 2900
},
{
"epoch": 3.2751091703056767,
"grad_norm": 26.671180725097656,
"learning_rate": 1.7718340611353713e-05,
"loss": 5.6147,
"step": 3000
},
{
"epoch": 3.3842794759825328,
"grad_norm": 32.73609924316406,
"learning_rate": 1.7308951965065504e-05,
"loss": 5.2704,
"step": 3100
},
{
"epoch": 3.493449781659389,
"grad_norm": 26.268295288085938,
"learning_rate": 1.689956331877729e-05,
"loss": 5.4946,
"step": 3200
},
{
"epoch": 3.6026200873362444,
"grad_norm": 24.3873233795166,
"learning_rate": 1.649017467248908e-05,
"loss": 5.5757,
"step": 3300
},
{
"epoch": 3.7117903930131004,
"grad_norm": 26.872316360473633,
"learning_rate": 1.6080786026200872e-05,
"loss": 5.3305,
"step": 3400
},
{
"epoch": 3.8209606986899565,
"grad_norm": 31.78321647644043,
"learning_rate": 1.5671397379912666e-05,
"loss": 5.4091,
"step": 3500
},
{
"epoch": 3.930131004366812,
"grad_norm": 37.95060729980469,
"learning_rate": 1.5262008733624454e-05,
"loss": 5.464,
"step": 3600
},
{
"epoch": 4.0,
"eval_avg_mae": 5.841625213623047,
"eval_loss": 5.841624736785889,
"eval_mae_lex": 5.572142124176025,
"eval_mae_sem": 3.7872631549835205,
"eval_mae_syn": 8.1654691696167,
"eval_runtime": 27.1382,
"eval_samples_per_second": 269.988,
"eval_steps_per_second": 8.438,
"step": 3664
}
],
"logging_steps": 100,
"max_steps": 7328,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7711064933354496.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}