poca-SoccerTwos / run_logs /timers.json
sighmon's picture
Updated hyperparameters
c6914bf verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5225673913955688,
"min": 1.5225673913955688,
"max": 3.2957441806793213,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 29330.73828125,
"min": 5167.7197265625,
"max": 113668.515625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.85915492957747,
"min": 52.236559139784944,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19556.0,
"min": 10664.0,
"max": 30600.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1564.132412604713,
"min": 1195.7582650991158,
"max": 1585.6010603712803,
"count": 997
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 222106.80258986924,
"min": 2391.6356690095045,
"max": 280817.67556786153,
"count": 997
},
"SoccerTwos.Step.mean": {
"value": 9999807.0,
"min": 9318.0,
"max": 9999807.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999807.0,
"min": 9318.0,
"max": 9999807.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.002926863729953766,
"min": -0.12521712481975555,
"max": 0.1654333770275116,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.41561466455459595,
"min": -17.57196807861328,
"max": 20.498958587646484,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0026975253131240606,
"min": -0.12621068954467773,
"max": 0.16394340991973877,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.3830485939979553,
"min": -17.406097412109375,
"max": 19.141633987426758,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.09942816890461344,
"min": -0.5555555555555556,
"max": 0.5586203336715698,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 14.118799984455109,
"min": -60.59880006313324,
"max": 50.470799803733826,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.09942816890461344,
"min": -0.5555555555555556,
"max": 0.5586203336715698,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 14.118799984455109,
"min": -60.59880006313324,
"max": 50.470799803733826,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01872166230416042,
"min": 0.014943431938481808,
"max": 0.03510042153124232,
"count": 242
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01872166230416042,
"min": 0.014943431938481808,
"max": 0.03510042153124232,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09321882776916027,
"min": 0.0013188533706124871,
"max": 0.09674498125910759,
"count": 242
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09321882776916027,
"min": 0.0013188533706124871,
"max": 0.09674498125910759,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09311030834913253,
"min": 0.001806568963220343,
"max": 0.09683528937399387,
"count": 242
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09311030834913253,
"min": 0.001806568963220343,
"max": 0.09683528937399387,
"count": 242
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 1.993699601279983e-06,
"min": 1.993699601279983e-06,
"max": 0.0004976956004608802,
"count": 242
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 1.993699601279983e-06,
"min": 1.993699601279983e-06,
"max": 0.0004976956004608802,
"count": 242
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.10039872000000001,
"min": 0.10039872000000001,
"max": 0.19953912000000007,
"count": 242
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.10039872000000001,
"min": 0.10039872000000001,
"max": 0.19953912000000007,
"count": 242
},
"SoccerTwos.Policy.Beta.mean": {
"value": 2.9896127999999837e-05,
"min": 2.9896127999999837e-05,
"max": 0.004977002087999999,
"count": 242
},
"SoccerTwos.Policy.Beta.sum": {
"value": 2.9896127999999837e-05,
"min": 2.9896127999999837e-05,
"max": 0.004977002087999999,
"count": 242
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740300544",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/opt/homebrew/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos-2.yaml --env=../SoccerTwos/SoccerTwos.app --run-id=SoccerTwos2 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1740334107"
},
"total": 33562.44418449997,
"count": 1,
"self": 0.20067595795262605,
"children": {
"run_training.setup": {
"total": 0.01644987502368167,
"count": 1,
"self": 0.01644987502368167
},
"TrainerController.start_learning": {
"total": 33562.22705866699,
"count": 1,
"self": 4.7262668830226175,
"children": {
"TrainerController._reset_env": {
"total": 5.692300462920684,
"count": 100,
"self": 5.692300462920684
},
"TrainerController.advance": {
"total": 33551.71988719603,
"count": 682481,
"self": 4.18010855675675,
"children": {
"env_step": {
"total": 24651.01519800315,
"count": 682481,
"self": 23826.049291290867,
"children": {
"SubprocessEnvManager._take_step": {
"total": 821.7446218503756,
"count": 682481,
"self": 24.649129757133778,
"children": {
"TorchPolicy.evaluate": {
"total": 797.0954920932418,
"count": 1266852,
"self": 797.0954920932418
}
}
},
"workers": {
"total": 3.2212848619092256,
"count": 682481,
"self": 0.0,
"children": {
"worker_root": {
"total": 33552.37349840597,
"count": 682481,
"is_parallel": true,
"self": 10406.242987696955,
"children": {
"steps_from_proto": {
"total": 0.12810837605502456,
"count": 200,
"is_parallel": true,
"self": 0.014584886142984033,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.11352348991204053,
"count": 800,
"is_parallel": true,
"self": 0.11352348991204053
}
}
},
"UnityEnvironment.step": {
"total": 23146.00240233296,
"count": 682481,
"is_parallel": true,
"self": 60.65305095887743,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 403.53046373667894,
"count": 682481,
"is_parallel": true,
"self": 403.53046373667894
},
"communicator.exchange": {
"total": 21884.70877407695,
"count": 682481,
"is_parallel": true,
"self": 21884.70877407695
},
"steps_from_proto": {
"total": 797.1101135604549,
"count": 1364962,
"is_parallel": true,
"self": 86.14840883447323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 710.9617047259817,
"count": 5459848,
"is_parallel": true,
"self": 710.9617047259817
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8896.52458063612,
"count": 682481,
"self": 40.46444412658457,
"children": {
"process_trajectory": {
"total": 1188.7088194735115,
"count": 682481,
"self": 1186.8568672216497,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8519522518618032,
"count": 20,
"self": 1.8519522518618032
}
}
},
"_update_policy": {
"total": 7667.351317036024,
"count": 242,
"self": 871.9001685880357,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6795.451148447988,
"count": 24210,
"self": 6795.451148447988
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.3300602808594704e-07,
"count": 1,
"self": 3.3300602808594704e-07
},
"TrainerController._save_models": {
"total": 0.08860379201360047,
"count": 1,
"self": 0.001809125009458512,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08679466700414196,
"count": 1,
"self": 0.08679466700414196
}
}
}
}
}
}
}