Ryukijano's picture
Second Push
2619b29
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11865365505218506,
"min": 0.10541042685508728,
"max": 1.4739055633544922,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 3527.3359375,
"min": 3167.37255859375,
"max": 44712.3984375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999877.0,
"min": 29952.0,
"max": 2999877.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999877.0,
"min": 29952.0,
"max": 2999877.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7665059566497803,
"min": -0.2936858534812927,
"max": 0.8855694532394409,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 231.48480224609375,
"min": -69.60354614257812,
"max": 267.74505615234375,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008230501785874367,
"min": -0.00638219527900219,
"max": 0.4062139093875885,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.4856114387512207,
"min": -1.6338419914245605,
"max": 97.89755249023438,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06905125242476406,
"min": 0.06332516432774035,
"max": 0.07390458639689945,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9667175339466969,
"min": 0.46313406405375,
"max": 1.0899484995655562,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017052337334635445,
"min": 0.00041890551688544353,
"max": 0.017052337334635445,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23873272268489623,
"min": 0.003770149651968992,
"max": 0.24165693658013718,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4869137901095207e-06,
"min": 1.4869137901095207e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.081679306153329e-05,
"min": 2.081679306153329e-05,
"max": 0.004010769063077,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049560476190478,
"min": 0.10049560476190478,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069384666666669,
"min": 1.3962282666666668,
"max": 2.8275326333333326,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.951091571428563e-05,
"min": 5.951091571428563e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008331528199999989,
"min": 0.0008331528199999989,
"max": 0.1336986077,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00911895651370287,
"min": 0.008289322257041931,
"max": 0.435276061296463,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12766538560390472,
"min": 0.11605051159858704,
"max": 3.0469324588775635,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 220.56204379562044,
"min": 206.25174825174824,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30217.0,
"min": 15984.0,
"max": 32692.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.765347811460927,
"min": -1.0000000521540642,
"max": 1.7914130324902742,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 243.6179979816079,
"min": -30.72320168465376,
"max": 251.5047977566719,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.765347811460927,
"min": -1.0000000521540642,
"max": 1.7914130324902742,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 243.6179979816079,
"min": -30.72320168465376,
"max": 251.5047977566719,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.020827621046945737,
"min": 0.019732272974846744,
"max": 8.042547635734081,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.874211704478512,
"min": 2.5694642106536776,
"max": 128.6807621717453,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677414577",
"python_version": "3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) \n[GCC 9.4.0]",
"command_line_arguments": "/opt/conda/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training_big --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677424419"
},
"total": 9842.678765529,
"count": 1,
"self": 0.4795583190025354,
"children": {
"run_training.setup": {
"total": 0.8911535219999678,
"count": 1,
"self": 0.8911535219999678
},
"TrainerController.start_learning": {
"total": 9841.308053687999,
"count": 1,
"self": 7.903677182308456,
"children": {
"TrainerController._reset_env": {
"total": 11.629287737999903,
"count": 1,
"self": 11.629287737999903
},
"TrainerController.advance": {
"total": 9821.66706515569,
"count": 195510,
"self": 8.540107526465363,
"children": {
"env_step": {
"total": 6535.337809279264,
"count": 195510,
"self": 6105.571293547024,
"children": {
"SubprocessEnvManager._take_step": {
"total": 425.12835970008825,
"count": 195510,
"self": 22.061832125323235,
"children": {
"TorchPolicy.evaluate": {
"total": 403.066527574765,
"count": 187541,
"self": 85.2255642740588,
"children": {
"TorchPolicy.sample_actions": {
"total": 317.8409633007062,
"count": 187541,
"self": 317.8409633007062
}
}
}
}
},
"workers": {
"total": 4.638156032151983,
"count": 195510,
"self": 0.0,
"children": {
"worker_root": {
"total": 9827.289793181038,
"count": 195510,
"is_parallel": true,
"self": 4265.744621056255,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006846225000003869,
"count": 1,
"is_parallel": true,
"self": 0.004471972999908758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023742520000951117,
"count": 8,
"is_parallel": true,
"self": 0.0023742520000951117
}
}
},
"UnityEnvironment.step": {
"total": 0.053986686000030204,
"count": 1,
"is_parallel": true,
"self": 0.0005002090000516546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000490298999920924,
"count": 1,
"is_parallel": true,
"self": 0.000490298999920924
},
"communicator.exchange": {
"total": 0.05140145300003951,
"count": 1,
"is_parallel": true,
"self": 0.05140145300003951
},
"steps_from_proto": {
"total": 0.0015947250000181157,
"count": 1,
"is_parallel": true,
"self": 0.00046503400005803996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011296909999600757,
"count": 8,
"is_parallel": true,
"self": 0.0011296909999600757
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5561.545172124784,
"count": 195509,
"is_parallel": true,
"self": 129.13871392320743,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.85391634096595,
"count": 195509,
"is_parallel": true,
"self": 79.85391634096595
},
"communicator.exchange": {
"total": 5003.162076848672,
"count": 195509,
"is_parallel": true,
"self": 5003.162076848672
},
"steps_from_proto": {
"total": 349.390465011938,
"count": 195509,
"is_parallel": true,
"self": 91.99124409877277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 257.3992209131652,
"count": 1564072,
"is_parallel": true,
"self": 257.3992209131652
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3277.7891483499616,
"count": 195510,
"self": 15.45308893200854,
"children": {
"process_trajectory": {
"total": 706.9838165629701,
"count": 195510,
"self": 706.2673064459711,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7165101169989612,
"count": 6,
"self": 0.7165101169989612
}
}
},
"_update_policy": {
"total": 2555.352242854983,
"count": 1398,
"self": 881.4208923539818,
"children": {
"TorchPPOOptimizer.update": {
"total": 1673.9313505010014,
"count": 68373,
"self": 1673.9313505010014
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3999997463542968e-06,
"count": 1,
"self": 1.3999997463542968e-06
},
"TrainerController._save_models": {
"total": 0.10802221199992346,
"count": 1,
"self": 0.002791353001157404,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10523085899876605,
"count": 1,
"self": 0.10523085899876605
}
}
}
}
}
}
}