poca-SoccerTwos / run_logs /timers.json
cdr6934's picture
First Push
0ec77dc verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.073544979095459,
"min": 2.073544979095459,
"max": 3.295722723007202,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 40011.125,
"min": 17774.3203125,
"max": 123155.9765625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 47.407766990291265,
"min": 42.71052631578947,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19532.0,
"min": 13104.0,
"max": 27908.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1530.5214059668333,
"min": 1197.338280397431,
"max": 1536.2752351188674,
"count": 455
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 315287.40962916764,
"min": 2395.6406482225407,
"max": 338069.3745882581,
"count": 455
},
"SoccerTwos.Step.mean": {
"value": 4999941.0,
"min": 9972.0,
"max": 4999941.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999941.0,
"min": 9972.0,
"max": 4999941.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.044154439121484756,
"min": -0.11047197878360748,
"max": 0.18677304685115814,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 9.139968872070312,
"min": -20.76873207092285,
"max": 31.017196655273438,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.04715830460190773,
"min": -0.09901896119117737,
"max": 0.17889966070652008,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 9.76176929473877,
"min": -18.71782875061035,
"max": 31.339508056640625,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.15214299464571304,
"min": -0.6037285732371467,
"max": 0.461738462631519,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -31.493599891662598,
"min": -52.06119978427887,
"max": 56.96719992160797,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.15214299464571304,
"min": -0.6037285732371467,
"max": 0.461738462631519,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -31.493599891662598,
"min": -52.06119978427887,
"max": 56.96719992160797,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019907069373099755,
"min": 0.010999497660668567,
"max": 0.022822708737415574,
"count": 237
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019907069373099755,
"min": 0.010999497660668567,
"max": 0.022822708737415574,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09903378958503405,
"min": 1.4826956430624706e-06,
"max": 0.10611215382814407,
"count": 237
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09903378958503405,
"min": 1.4826956430624706e-06,
"max": 0.10611215382814407,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10096097265680631,
"min": 2.2768891881241872e-06,
"max": 0.10855208511153856,
"count": 237
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10096097265680631,
"min": 2.2768891881241872e-06,
"max": 0.10855208511153856,
"count": 237
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 237
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 237
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 237
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740015763",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/christopherried/miniconda/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1740034466"
},
"total": 18703.406426667,
"count": 1,
"self": 0.5488614579990099,
"children": {
"run_training.setup": {
"total": 0.24192266699901666,
"count": 1,
"self": 0.24192266699901666
},
"TrainerController.start_learning": {
"total": 18702.615642542,
"count": 1,
"self": 3.039812376129703,
"children": {
"TrainerController._reset_env": {
"total": 8.344496417003029,
"count": 25,
"self": 8.344496417003029
},
"TrainerController.advance": {
"total": 18691.150191248867,
"count": 335723,
"self": 3.059594879101496,
"children": {
"env_step": {
"total": 14839.703953800014,
"count": 335723,
"self": 14373.612442651409,
"children": {
"SubprocessEnvManager._take_step": {
"total": 463.8422457955021,
"count": 335723,
"self": 15.268253896187161,
"children": {
"TorchPolicy.evaluate": {
"total": 448.57399189931493,
"count": 637778,
"self": 448.57399189931493
}
}
},
"workers": {
"total": 2.249265353102601,
"count": 335723,
"self": 0.0,
"children": {
"worker_root": {
"total": 18691.782607329926,
"count": 335723,
"is_parallel": true,
"self": 4684.161519784264,
"children": {
"steps_from_proto": {
"total": 0.03717392300859501,
"count": 50,
"is_parallel": true,
"self": 0.004857292031374527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.032316630977220484,
"count": 200,
"is_parallel": true,
"self": 0.032316630977220484
}
}
},
"UnityEnvironment.step": {
"total": 14007.583913622653,
"count": 335723,
"is_parallel": true,
"self": 37.47121835343751,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 247.11113200856562,
"count": 335723,
"is_parallel": true,
"self": 247.11113200856562
},
"communicator.exchange": {
"total": 13255.785749119525,
"count": 335723,
"is_parallel": true,
"self": 13255.785749119525
},
"steps_from_proto": {
"total": 467.2158141411255,
"count": 671446,
"is_parallel": true,
"self": 54.13088047097335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 413.08493367015217,
"count": 2685784,
"is_parallel": true,
"self": 413.08493367015217
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3848.3866425697506,
"count": 335723,
"self": 26.067895859150667,
"children": {
"process_trajectory": {
"total": 583.286690374598,
"count": 335723,
"self": 582.4373964575916,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8492939170064346,
"count": 10,
"self": 0.8492939170064346
}
}
},
"_update_policy": {
"total": 3239.032056336002,
"count": 237,
"self": 321.79181848685766,
"children": {
"TorchPOCAOptimizer.update": {
"total": 2917.2402378491443,
"count": 7110,
"self": 2917.2402378491443
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.000001692678779e-07,
"count": 1,
"self": 5.000001692678779e-07
},
"TrainerController._save_models": {
"total": 0.08114199999909033,
"count": 1,
"self": 0.0006171659988467582,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08052483400024357,
"count": 1,
"self": 0.08052483400024357
}
}
}
}
}
}
}