{ "best_metric": 0.6502026915550232, "best_model_checkpoint": "gemma2_on_korean_summary/checkpoint-220", "epoch": 2.8947368421052633, "eval_steps": 20, "global_step": 220, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.26, "grad_norm": 0.9722719192504883, "learning_rate": 2e-05, "loss": 1.1522, "step": 20 }, { "epoch": 0.26, "eval_loss": 1.033677101135254, "eval_runtime": 209.4976, "eval_samples_per_second": 0.955, "eval_steps_per_second": 0.477, "step": 20 }, { "epoch": 0.53, "grad_norm": 1.436105489730835, "learning_rate": 4e-05, "loss": 0.9522, "step": 40 }, { "epoch": 0.53, "eval_loss": 0.8600569367408752, "eval_runtime": 206.2633, "eval_samples_per_second": 0.97, "eval_steps_per_second": 0.485, "step": 40 }, { "epoch": 0.79, "grad_norm": 1.6157077550888062, "learning_rate": 4.933333333333334e-05, "loss": 0.8405, "step": 60 }, { "epoch": 0.79, "eval_loss": 0.7875047326087952, "eval_runtime": 207.822, "eval_samples_per_second": 0.962, "eval_steps_per_second": 0.481, "step": 60 }, { "epoch": 1.05, "grad_norm": 1.4430443048477173, "learning_rate": 4.8e-05, "loss": 0.7687, "step": 80 }, { "epoch": 1.05, "eval_loss": 0.7403695583343506, "eval_runtime": 210.2054, "eval_samples_per_second": 0.951, "eval_steps_per_second": 0.476, "step": 80 }, { "epoch": 1.32, "grad_norm": 1.640892505645752, "learning_rate": 4.666666666666667e-05, "loss": 0.6901, "step": 100 }, { "epoch": 1.32, "eval_loss": 0.7110423445701599, "eval_runtime": 206.6708, "eval_samples_per_second": 0.968, "eval_steps_per_second": 0.484, "step": 100 }, { "epoch": 1.58, "grad_norm": 1.6705243587493896, "learning_rate": 4.5333333333333335e-05, "loss": 0.6588, "step": 120 }, { "epoch": 1.58, "eval_loss": 0.6947363018989563, "eval_runtime": 208.6184, "eval_samples_per_second": 0.959, "eval_steps_per_second": 0.479, "step": 120 }, { "epoch": 1.84, "grad_norm": 1.7401701211929321, "learning_rate": 4.4000000000000006e-05, "loss": 0.6698, "step": 140 }, { "epoch": 1.84, "eval_loss": 0.6739406585693359, "eval_runtime": 207.3092, "eval_samples_per_second": 0.965, "eval_steps_per_second": 0.482, "step": 140 }, { "epoch": 2.11, "grad_norm": 1.804783582687378, "learning_rate": 4.266666666666667e-05, "loss": 0.6075, "step": 160 }, { "epoch": 2.11, "eval_loss": 0.6695161461830139, "eval_runtime": 206.5895, "eval_samples_per_second": 0.968, "eval_steps_per_second": 0.484, "step": 160 }, { "epoch": 2.37, "grad_norm": 2.2475624084472656, "learning_rate": 4.133333333333333e-05, "loss": 0.5221, "step": 180 }, { "epoch": 2.37, "eval_loss": 0.6713693141937256, "eval_runtime": 209.2432, "eval_samples_per_second": 0.956, "eval_steps_per_second": 0.478, "step": 180 }, { "epoch": 2.63, "grad_norm": 2.268982172012329, "learning_rate": 4e-05, "loss": 0.4865, "step": 200 }, { "epoch": 2.63, "eval_loss": 0.6660403609275818, "eval_runtime": 205.9824, "eval_samples_per_second": 0.971, "eval_steps_per_second": 0.485, "step": 200 }, { "epoch": 2.89, "grad_norm": 2.3548617362976074, "learning_rate": 3.866666666666667e-05, "loss": 0.5239, "step": 220 }, { "epoch": 2.89, "eval_loss": 0.6502026915550232, "eval_runtime": 206.4589, "eval_samples_per_second": 0.969, "eval_steps_per_second": 0.484, "step": 220 } ], "logging_steps": 20, "max_steps": 800, "num_input_tokens_seen": 0, "num_train_epochs": 11, "save_steps": 20, "total_flos": 1.917650992083763e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }