poca-SoccerTwos / run_logs /timers.json
amostof's picture
First Push
e60a74e verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.273122549057007,
"min": 3.273122549057007,
"max": 3.2956995964050293,
"count": 7
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 88819.453125,
"min": 36824.328125,
"max": 105462.390625,
"count": 7
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 986.3333333333334,
"min": 349.0,
"max": 999.0,
"count": 7
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 23672.0,
"min": 1396.0,
"max": 31968.0,
"count": 7
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.320921595067,
"min": 1197.273575898641,
"max": 1199.3173316810933,
"count": 6
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4793.283686380268,
"min": 2394.547151797282,
"max": 14377.498515805344,
"count": 6
},
"SoccerTwos.Step.mean": {
"value": 69978.0,
"min": 9354.0,
"max": 69978.0,
"count": 7
},
"SoccerTwos.Step.sum": {
"value": 69978.0,
"min": 9354.0,
"max": 69978.0,
"count": 7
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.029395604506134987,
"min": -0.029395604506134987,
"max": -0.027391143143177032,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.4703296720981598,
"min": -0.4703296720981598,
"max": -0.27665016055107117,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.027215536683797836,
"min": -0.027613306418061256,
"max": -0.025287168100476265,
"count": 5
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4354485869407654,
"min": -0.4354485869407654,
"max": -0.2761234641075134,
"count": 5
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 7
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 7
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.08109090544960716,
"min": -0.38461538461538464,
"max": 0.3400000110268593,
"count": 7
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.8919999599456787,
"min": -5.045199990272522,
"max": 5.4400001764297485,
"count": 7
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.08109090544960716,
"min": -0.38461538461538464,
"max": 0.3400000110268593,
"count": 7
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.8919999599456787,
"min": -5.045199990272522,
"max": 5.4400001764297485,
"count": 7
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020737362704433812,
"min": 0.019029662696647437,
"max": 0.020737362704433812,
"count": 2
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020737362704433812,
"min": 0.019029662696647437,
"max": 0.020737362704433812,
"count": 2
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0020418568774836048,
"min": 0.0020418568774836048,
"max": 0.0020418568774836048,
"count": 1
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0020418568774836048,
"min": 0.0020418568774836048,
"max": 0.0020418568774836048,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.001893633756221139,
"min": 0.001893633756221139,
"max": 0.001893633756221139,
"count": 1
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.001893633756221139,
"min": 0.001893633756221139,
"max": 0.001893633756221139,
"count": 1
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740688151",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/amir/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1740688431"
},
"total": 280.2057829590012,
"count": 1,
"self": 0.224106584002584,
"children": {
"run_training.setup": {
"total": 0.022819750000053318,
"count": 1,
"self": 0.022819750000053318
},
"TrainerController.start_learning": {
"total": 279.9588566249986,
"count": 1,
"self": 0.0579930067397072,
"children": {
"TrainerController._reset_env": {
"total": 3.7447732920009003,
"count": 1,
"self": 3.7447732920009003
},
"TrainerController.advance": {
"total": 276.02421032625716,
"count": 5001,
"self": 0.053040216935187345,
"children": {
"env_step": {
"total": 225.45828245407756,
"count": 5001,
"self": 215.46659168500264,
"children": {
"SubprocessEnvManager._take_step": {
"total": 9.953332595054235,
"count": 5001,
"self": 0.24310704026720487,
"children": {
"TorchPolicy.evaluate": {
"total": 9.71022555478703,
"count": 9958,
"self": 9.71022555478703
}
}
},
"workers": {
"total": 0.03835817402068642,
"count": 5000,
"self": 0.0,
"children": {
"worker_root": {
"total": 276.7622806810941,
"count": 5000,
"is_parallel": true,
"self": 69.43057033927471,
"children": {
"steps_from_proto": {
"total": 0.001644916002987884,
"count": 2,
"is_parallel": true,
"self": 0.0002262499947391916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014186660082486924,
"count": 8,
"is_parallel": true,
"self": 0.0014186660082486924
}
}
},
"UnityEnvironment.step": {
"total": 207.33006542581643,
"count": 5000,
"is_parallel": true,
"self": 0.6085860606945062,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.5735817189524823,
"count": 5000,
"is_parallel": true,
"self": 3.5735817189524823
},
"communicator.exchange": {
"total": 195.70611232615192,
"count": 5000,
"is_parallel": true,
"self": 195.70611232615192
},
"steps_from_proto": {
"total": 7.441785320017516,
"count": 10000,
"is_parallel": true,
"self": 0.8515734473185148,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.590211872699001,
"count": 40000,
"is_parallel": true,
"self": 6.590211872699001
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 50.51288765524441,
"count": 5000,
"self": 0.45996707937592873,
"children": {
"process_trajectory": {
"total": 7.4487364088709,
"count": 5000,
"self": 7.4487364088709
},
"_update_policy": {
"total": 42.604184166997584,
"count": 3,
"self": 4.489408828998421,
"children": {
"TorchPOCAOptimizer.update": {
"total": 38.11477533799916,
"count": 102,
"self": 38.11477533799916
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13188000000081956,
"count": 1,
"self": 2.0459003280848265e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13185954099753872,
"count": 1,
"self": 0.13185954099753872
}
}
}
}
}
}
}