ppo-Pyramids / run_logs /timers.json
Alex48's picture
First train of Pyramids
830fe49
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.25369173288345337,
"min": 0.25369173288345337,
"max": 0.6018595695495605,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7578.27978515625,
"min": 7578.27978515625,
"max": 18248.3828125,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 262.2413793103448,
"min": 253.47787610619469,
"max": 518.3214285714286,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30420.0,
"min": 16386.0,
"max": 32195.0,
"count": 33
},
"Pyramids.Step.mean": {
"value": 1979962.0,
"min": 1019995.0,
"max": 1979962.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 1979962.0,
"min": 1019995.0,
"max": 1979962.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6653643846511841,
"min": 0.33284303545951843,
"max": 0.7204941511154175,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 194.28640747070312,
"min": 60.96426010131836,
"max": 208.94329833984375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00930364616215229,
"min": -0.009778780862689018,
"max": 0.018503595143556595,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.7166647911071777,
"min": -2.620713233947754,
"max": 5.384546279907227,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7205154999833683,
"min": 1.2316142600029707,
"max": 1.7465221175050314,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 199.57979799807072,
"min": 59.611199140548706,
"max": 201.0467983186245,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7205154999833683,
"min": 1.2316142600029707,
"max": 1.7465221175050314,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 199.57979799807072,
"min": 59.611199140548706,
"max": 201.0467983186245,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02222495211917076,
"min": 0.02222495211917076,
"max": 0.06723571608868692,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.578094445823808,
"min": 2.1225532993084926,
"max": 4.228889076592168,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.04914928098183332,
"min": 0.043349091670473655,
"max": 0.052878177308982866,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.2457464049091666,
"min": 0.1586345319269486,
"max": 0.258863853989169,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01729716836060915,
"min": 0.013927941858613244,
"max": 0.019205977144237194,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08648584180304573,
"min": 0.044174738196827074,
"max": 0.09559099505551986,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.110238296619997e-06,
"min": 5.110238296619997e-06,
"max": 0.00014813580062141666,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.5551191483099982e-05,
"min": 2.5551191483099982e-05,
"max": 0.0007220291593237,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10170338000000001,
"min": 0.10170338000000001,
"max": 0.14937858333333334,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.5085169,
"min": 0.43093954999999995,
"max": 0.7406763000000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001801676619999999,
"min": 0.0001801676619999999,
"max": 0.0049429204749999985,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0009008383099999995,
"min": 0.0009008383099999995,
"max": 0.024093562370000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0083088343963027,
"min": 0.008252114057540894,
"max": 0.013184490613639355,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.041544172912836075,
"min": 0.03425419330596924,
"max": 0.06364284455776215,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677356378",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677358858"
},
"total": 2480.2627164839996,
"count": 1,
"self": 0.4748123349991147,
"children": {
"run_training.setup": {
"total": 0.1431849840000723,
"count": 1,
"self": 0.1431849840000723
},
"TrainerController.start_learning": {
"total": 2479.6447191650004,
"count": 1,
"self": 1.4335110277315835,
"children": {
"TrainerController._reset_env": {
"total": 6.13497211000049,
"count": 1,
"self": 6.13497211000049
},
"TrainerController.advance": {
"total": 2471.8491541672674,
"count": 64964,
"self": 1.5000755995179134,
"children": {
"env_step": {
"total": 1815.7038514351125,
"count": 64964,
"self": 1695.797473493496,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.04096162380847,
"count": 64964,
"self": 4.827547457716719,
"children": {
"TorchPolicy.evaluate": {
"total": 114.21341416609175,
"count": 62553,
"self": 36.645060465072675,
"children": {
"TorchPolicy.sample_actions": {
"total": 77.56835370101908,
"count": 62553,
"self": 77.56835370101908
}
}
}
}
},
"workers": {
"total": 0.8654163178080125,
"count": 64964,
"self": 0.0,
"children": {
"worker_root": {
"total": 2473.8393299622276,
"count": 64964,
"is_parallel": true,
"self": 897.1545604761104,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017807859994718456,
"count": 1,
"is_parallel": true,
"self": 0.0006256019996726536,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001155183999799192,
"count": 8,
"is_parallel": true,
"self": 0.001155183999799192
}
}
},
"UnityEnvironment.step": {
"total": 0.050205796000227565,
"count": 1,
"is_parallel": true,
"self": 0.0005408490005720523,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004524260002654046,
"count": 1,
"is_parallel": true,
"self": 0.0004524260002654046
},
"communicator.exchange": {
"total": 0.04752084099982312,
"count": 1,
"is_parallel": true,
"self": 0.04752084099982312
},
"steps_from_proto": {
"total": 0.0016916799995669862,
"count": 1,
"is_parallel": true,
"self": 0.0004301779990782961,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00126150200048869,
"count": 8,
"is_parallel": true,
"self": 0.00126150200048869
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1576.6847694861171,
"count": 64963,
"is_parallel": true,
"self": 31.85400165772262,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.00679651914561,
"count": 64963,
"is_parallel": true,
"self": 23.00679651914561
},
"communicator.exchange": {
"total": 1427.6538177152506,
"count": 64963,
"is_parallel": true,
"self": 1427.6538177152506
},
"steps_from_proto": {
"total": 94.1701535939983,
"count": 64963,
"is_parallel": true,
"self": 22.45351872144056,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.71663487255773,
"count": 519704,
"is_parallel": true,
"self": 71.71663487255773
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 654.645227132637,
"count": 64964,
"self": 2.937748130804721,
"children": {
"process_trajectory": {
"total": 171.1994958168225,
"count": 64964,
"self": 170.8176565058211,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3818393110013858,
"count": 2,
"self": 0.3818393110013858
}
}
},
"_update_policy": {
"total": 480.50798318500983,
"count": 161,
"self": 217.61159531405883,
"children": {
"TorchPPOOptimizer.update": {
"total": 262.896387870951,
"count": 11529,
"self": 262.896387870951
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.360010153613985e-07,
"count": 1,
"self": 9.360010153613985e-07
},
"TrainerController._save_models": {
"total": 0.22708092399989255,
"count": 1,
"self": 0.010736704000009922,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21634421999988263,
"count": 1,
"self": 0.21634421999988263
}
}
}
}
}
}
}