|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.14657598499061913, |
|
"eval_steps": 500, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0029315196998123826, |
|
"grad_norm": 0.7248865962028503, |
|
"learning_rate": 4.9868042930033434e-05, |
|
"loss": 1.3593, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.005863039399624765, |
|
"grad_norm": 0.3698488771915436, |
|
"learning_rate": 4.972142396340391e-05, |
|
"loss": 1.3285, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.008794559099437148, |
|
"grad_norm": 0.24848727881908417, |
|
"learning_rate": 4.957480499677439e-05, |
|
"loss": 1.324, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.01172607879924953, |
|
"grad_norm": 0.2648352086544037, |
|
"learning_rate": 4.942818603014486e-05, |
|
"loss": 1.3145, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.014657598499061914, |
|
"grad_norm": 0.3995356559753418, |
|
"learning_rate": 4.928156706351534e-05, |
|
"loss": 1.3141, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.017589118198874296, |
|
"grad_norm": 0.32299748063087463, |
|
"learning_rate": 4.913494809688581e-05, |
|
"loss": 1.3087, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.02052063789868668, |
|
"grad_norm": 0.33237338066101074, |
|
"learning_rate": 4.898832913025629e-05, |
|
"loss": 1.3071, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.02345215759849906, |
|
"grad_norm": 0.2716487646102905, |
|
"learning_rate": 4.8841710163626766e-05, |
|
"loss": 1.3048, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.026383677298311446, |
|
"grad_norm": 0.8654017448425293, |
|
"learning_rate": 4.869509119699725e-05, |
|
"loss": 1.3087, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.02931519699812383, |
|
"grad_norm": 0.27945375442504883, |
|
"learning_rate": 4.854847223036773e-05, |
|
"loss": 1.3015, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03224671669793621, |
|
"grad_norm": 0.27838173508644104, |
|
"learning_rate": 4.84018532637382e-05, |
|
"loss": 1.2998, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.03517823639774859, |
|
"grad_norm": 0.24157749116420746, |
|
"learning_rate": 4.8255234297108676e-05, |
|
"loss": 1.3016, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.038109756097560975, |
|
"grad_norm": 0.45793822407722473, |
|
"learning_rate": 4.810861533047915e-05, |
|
"loss": 1.2983, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.04104127579737336, |
|
"grad_norm": 0.1312064826488495, |
|
"learning_rate": 4.796199636384963e-05, |
|
"loss": 1.2978, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.04397279549718574, |
|
"grad_norm": 0.33062976598739624, |
|
"learning_rate": 4.7815377397220105e-05, |
|
"loss": 1.2972, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04690431519699812, |
|
"grad_norm": 0.29819390177726746, |
|
"learning_rate": 4.766875843059058e-05, |
|
"loss": 1.2945, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.0498358348968105, |
|
"grad_norm": 0.3396131694316864, |
|
"learning_rate": 4.752213946396106e-05, |
|
"loss": 1.2921, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.05276735459662289, |
|
"grad_norm": 0.3231181502342224, |
|
"learning_rate": 4.737552049733154e-05, |
|
"loss": 1.2932, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.055698874296435275, |
|
"grad_norm": 0.4015660285949707, |
|
"learning_rate": 4.7228901530702015e-05, |
|
"loss": 1.2899, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.05863039399624766, |
|
"grad_norm": 0.436213880777359, |
|
"learning_rate": 4.708228256407249e-05, |
|
"loss": 1.2906, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.06156191369606004, |
|
"grad_norm": 0.3451833426952362, |
|
"learning_rate": 4.693566359744297e-05, |
|
"loss": 1.2884, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.06449343339587242, |
|
"grad_norm": 0.41890543699264526, |
|
"learning_rate": 4.6789044630813445e-05, |
|
"loss": 1.2892, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.0674249530956848, |
|
"grad_norm": 0.3117181062698364, |
|
"learning_rate": 4.664242566418392e-05, |
|
"loss": 1.2866, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.07035647279549719, |
|
"grad_norm": 0.2703840136528015, |
|
"learning_rate": 4.64958066975544e-05, |
|
"loss": 1.2869, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.07328799249530957, |
|
"grad_norm": 0.31400740146636963, |
|
"learning_rate": 4.6349187730924874e-05, |
|
"loss": 1.2844, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.07621951219512195, |
|
"grad_norm": 0.36265355348587036, |
|
"learning_rate": 4.620256876429535e-05, |
|
"loss": 1.2847, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.07915103189493433, |
|
"grad_norm": 0.32774218916893005, |
|
"learning_rate": 4.605594979766583e-05, |
|
"loss": 1.2889, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.08208255159474671, |
|
"grad_norm": 0.26954421401023865, |
|
"learning_rate": 4.590933083103631e-05, |
|
"loss": 1.2823, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.0850140712945591, |
|
"grad_norm": 0.4042912423610687, |
|
"learning_rate": 4.5762711864406784e-05, |
|
"loss": 1.2869, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.08794559099437148, |
|
"grad_norm": 0.28043991327285767, |
|
"learning_rate": 4.561609289777726e-05, |
|
"loss": 1.2851, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.09087711069418386, |
|
"grad_norm": 0.35115665197372437, |
|
"learning_rate": 4.546947393114774e-05, |
|
"loss": 1.2816, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.09380863039399624, |
|
"grad_norm": 0.3325822651386261, |
|
"learning_rate": 4.532285496451821e-05, |
|
"loss": 1.2821, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.09674015009380862, |
|
"grad_norm": 0.14176355302333832, |
|
"learning_rate": 4.517623599788869e-05, |
|
"loss": 1.2808, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.099671669793621, |
|
"grad_norm": 0.3418448567390442, |
|
"learning_rate": 4.502961703125916e-05, |
|
"loss": 1.2808, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.1026031894934334, |
|
"grad_norm": 0.3579727113246918, |
|
"learning_rate": 4.488299806462964e-05, |
|
"loss": 1.2768, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.10553470919324578, |
|
"grad_norm": 0.2470945566892624, |
|
"learning_rate": 4.473637909800012e-05, |
|
"loss": 1.2812, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.10846622889305817, |
|
"grad_norm": 0.32134631276130676, |
|
"learning_rate": 4.45897601313706e-05, |
|
"loss": 1.2797, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.11139774859287055, |
|
"grad_norm": 0.2982274889945984, |
|
"learning_rate": 4.444314116474108e-05, |
|
"loss": 1.2762, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.11432926829268293, |
|
"grad_norm": 0.2519618570804596, |
|
"learning_rate": 4.429652219811155e-05, |
|
"loss": 1.2767, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.11726078799249531, |
|
"grad_norm": 0.30841368436813354, |
|
"learning_rate": 4.4149903231482026e-05, |
|
"loss": 1.2765, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.1201923076923077, |
|
"grad_norm": 0.3413415551185608, |
|
"learning_rate": 4.40032842648525e-05, |
|
"loss": 1.274, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.12312382739212008, |
|
"grad_norm": 0.3912579119205475, |
|
"learning_rate": 4.385666529822298e-05, |
|
"loss": 1.2694, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.12605534709193245, |
|
"grad_norm": 0.39514127373695374, |
|
"learning_rate": 4.3710046331593455e-05, |
|
"loss": 1.2742, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.12898686679174484, |
|
"grad_norm": 0.5046316385269165, |
|
"learning_rate": 4.356342736496393e-05, |
|
"loss": 1.2723, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.1319183864915572, |
|
"grad_norm": 0.40433749556541443, |
|
"learning_rate": 4.341680839833441e-05, |
|
"loss": 1.2714, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.1348499061913696, |
|
"grad_norm": 0.41701552271842957, |
|
"learning_rate": 4.327018943170489e-05, |
|
"loss": 1.2686, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.137781425891182, |
|
"grad_norm": 0.6152161359786987, |
|
"learning_rate": 4.3123570465075365e-05, |
|
"loss": 1.2642, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 0.14071294559099437, |
|
"grad_norm": 0.6579223275184631, |
|
"learning_rate": 4.297695149844584e-05, |
|
"loss": 1.2648, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.14364446529080677, |
|
"grad_norm": 0.47039562463760376, |
|
"learning_rate": 4.283033253181632e-05, |
|
"loss": 1.2623, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 0.14657598499061913, |
|
"grad_norm": 0.5053458213806152, |
|
"learning_rate": 4.2683713565186795e-05, |
|
"loss": 1.2581, |
|
"step": 5000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 34112, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.184673878016e+16, |
|
"train_batch_size": 36, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|