poca-SoccerTwos / run_logs /timers.json
besa2001's picture
First Push
e8a1022
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.50038480758667,
"min": 1.4463025331497192,
"max": 3.2957005500793457,
"count": 1951
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 30487.8203125,
"min": 18397.40625,
"max": 158768.234375,
"count": 1951
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 61.160493827160494,
"min": 41.63793103448276,
"max": 999.0,
"count": 1951
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19816.0,
"min": 14264.0,
"max": 27236.0,
"count": 1951
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1662.573386692991,
"min": 1187.7017999143338,
"max": 1703.9293699092589,
"count": 1944
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 269336.88864426455,
"min": 2376.7834953756137,
"max": 377645.862949851,
"count": 1944
},
"SoccerTwos.Step.mean": {
"value": 19509902.0,
"min": 9634.0,
"max": 19509902.0,
"count": 1951
},
"SoccerTwos.Step.sum": {
"value": 19509902.0,
"min": 9634.0,
"max": 19509902.0,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.004716084338724613,
"min": -0.13077238202095032,
"max": 0.23248226940631866,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.7640056610107422,
"min": -26.023704528808594,
"max": 27.897872924804688,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0025035864673554897,
"min": -0.13233692944049835,
"max": 0.23026040196418762,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.405580997467041,
"min": -26.33504867553711,
"max": 27.631248474121094,
"count": 1951
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1951
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0590913560655382,
"min": -0.5454545454545454,
"max": 0.49807326569415555,
"count": 1951
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 9.572799682617188,
"min": -62.55239987373352,
"max": 52.70700013637543,
"count": 1951
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0590913560655382,
"min": -0.5454545454545454,
"max": 0.49807326569415555,
"count": 1951
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 9.572799682617188,
"min": -62.55239987373352,
"max": 52.70700013637543,
"count": 1951
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1951
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1951
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.0203987649991177,
"min": 0.010457840793969808,
"max": 0.025209849560633303,
"count": 945
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.0203987649991177,
"min": 0.010457840793969808,
"max": 0.025209849560633303,
"count": 945
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10580266863107682,
"min": 0.00023729713696714802,
"max": 0.1228718139231205,
"count": 945
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10580266863107682,
"min": 0.00023729713696714802,
"max": 0.1228718139231205,
"count": 945
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10765738462408384,
"min": 0.00024293343982814501,
"max": 0.12517891004681586,
"count": 945
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10765738462408384,
"min": 0.00024293343982814501,
"max": 0.12517891004681586,
"count": 945
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 945
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 945
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 945
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 945
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 945
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 945
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676901432",
"python_version": "3.9.16 (main, Jan 11 2023, 16:05:54) \n[GCC 11.2.0]",
"command_line_arguments": "/home/besa/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1",
"numpy_version": "1.21.2",
"end_time_seconds": "1676923073"
},
"total": 21640.979151537,
"count": 1,
"self": 0.1469642060037586,
"children": {
"run_training.setup": {
"total": 0.007614835999902425,
"count": 1,
"self": 0.007614835999902425
},
"TrainerController.start_learning": {
"total": 21640.824572495,
"count": 1,
"self": 26.142664178252744,
"children": {
"TrainerController._reset_env": {
"total": 3.028352016009876,
"count": 98,
"self": 3.028352016009876
},
"TrainerController.advance": {
"total": 21611.53240772474,
"count": 1343175,
"self": 25.12081788350406,
"children": {
"env_step": {
"total": 15813.638102928096,
"count": 1343175,
"self": 11907.680019729087,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3889.6485960010004,
"count": 1343175,
"self": 112.11267394682454,
"children": {
"TorchPolicy.evaluate": {
"total": 3777.535922054176,
"count": 2453406,
"self": 3777.535922054176
}
}
},
"workers": {
"total": 16.309487198009492,
"count": 1343174,
"self": 0.0,
"children": {
"worker_root": {
"total": 21612.656639202512,
"count": 1343174,
"is_parallel": true,
"self": 12094.475062970465,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017050439998911315,
"count": 2,
"is_parallel": true,
"self": 0.00046933500016166363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001235708999729468,
"count": 8,
"is_parallel": true,
"self": 0.001235708999729468
}
}
},
"UnityEnvironment.step": {
"total": 0.02268114899993634,
"count": 1,
"is_parallel": true,
"self": 0.00046165399999154033,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034718300003078184,
"count": 1,
"is_parallel": true,
"self": 0.00034718300003078184
},
"communicator.exchange": {
"total": 0.020472824999842487,
"count": 1,
"is_parallel": true,
"self": 0.020472824999842487
},
"steps_from_proto": {
"total": 0.001399487000071531,
"count": 2,
"is_parallel": true,
"self": 0.0002858620000552037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011136250000163272,
"count": 8,
"is_parallel": true,
"self": 0.0011136250000163272
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 9518.028528892033,
"count": 1343173,
"is_parallel": true,
"self": 566.6882429159596,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 352.21802995038206,
"count": 1343173,
"is_parallel": true,
"self": 352.21802995038206
},
"communicator.exchange": {
"total": 6962.769831656675,
"count": 1343173,
"is_parallel": true,
"self": 6962.769831656675
},
"steps_from_proto": {
"total": 1636.3524243690176,
"count": 2686346,
"is_parallel": true,
"self": 317.62920514360985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1318.7232192254078,
"count": 10745384,
"is_parallel": true,
"self": 1318.7232192254078
}
}
}
}
},
"steps_from_proto": {
"total": 0.15304734001392717,
"count": 194,
"is_parallel": true,
"self": 0.030386975050532783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.12266036496339439,
"count": 776,
"is_parallel": true,
"self": 0.12266036496339439
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5772.773486913136,
"count": 1343174,
"self": 185.6607692266134,
"children": {
"process_trajectory": {
"total": 2217.7470853194845,
"count": 1343174,
"self": 2213.118357621486,
"children": {
"RLTrainer._checkpoint": {
"total": 4.628727697998329,
"count": 39,
"self": 4.628727697998329
}
}
},
"_update_policy": {
"total": 3369.3656323670384,
"count": 945,
"self": 1959.0800218040945,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1410.285610562944,
"count": 28353,
"self": 1410.285610562944
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8860009731724858e-06,
"count": 1,
"self": 1.8860009731724858e-06
},
"TrainerController._save_models": {
"total": 0.12114668999856804,
"count": 1,
"self": 0.0012847439975303132,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11986194600103772,
"count": 1,
"self": 0.11986194600103772
}
}
}
}
}
}
}