ppo-Pyramids / run_logs /timers.json
juansebashr's picture
First Push
149ecdd
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3878195285797119,
"min": 0.3855995237827301,
"max": 0.4187244474887848,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 11603.560546875,
"min": 8532.943359375,
"max": 12642.12890625,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.2087912087912,
"min": 264.78125,
"max": 351.15294117647056,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29230.0,
"min": 16946.0,
"max": 32099.0,
"count": 7
},
"Pyramids.Step.mean": {
"value": 1199961.0,
"min": 1019965.0,
"max": 1199961.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 1199961.0,
"min": 1019965.0,
"max": 1199961.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.594951868057251,
"min": 0.563068151473999,
"max": 0.6747236847877502,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.77642822265625,
"min": 117.49083709716797,
"max": 192.97097778320312,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.020892998203635216,
"min": -0.020892998203635216,
"max": 0.1663450300693512,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -5.8918256759643555,
"min": -5.8918256759643555,
"max": 31.106521606445312,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6592217233517896,
"min": 1.590455893226849,
"max": 1.735218738205731,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 152.64839854836464,
"min": 111.05399924516678,
"max": 168.34579879045486,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6592217233517896,
"min": 1.590455893226849,
"max": 1.735218738205731,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 152.64839854836464,
"min": 111.05399924516678,
"max": 168.34579879045486,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.027534870463964384,
"min": 0.02347926075117357,
"max": 0.030307473896148924,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.533208082684723,
"min": 1.5026726880751085,
"max": 2.721572883456247,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06903699719890331,
"min": 0.06500396157415318,
"max": 0.07014347480227505,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0355549579835497,
"min": 0.6154614066666303,
"max": 1.0355549579835497,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01606998879578896,
"min": 0.012300257131304716,
"max": 0.016534775501765707,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24104983193683438,
"min": 0.1303693948769554,
"max": 0.24104983193683438,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.725732091455558e-06,
"min": 3.725732091455558e-06,
"max": 4.737477865287963e-05,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.588598137183337e-05,
"min": 5.588598137183337e-05,
"max": 0.0005783179572277499,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10124187777777777,
"min": 0.10124187777777777,
"max": 0.11579156481481483,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5186281666666666,
"min": 1.0421240833333334,
"max": 1.631316166666667,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001340635900000001,
"min": 0.0001340635900000001,
"max": 0.0015875773250000002,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0020109538500000016,
"min": 0.0020109538500000016,
"max": 0.019397947775,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008361710235476494,
"min": 0.008012756705284119,
"max": 0.008607222698628902,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12542565166950226,
"min": 0.0763489156961441,
"max": 0.12542565166950226,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678766396",
"python_version": "3.9.5 (tags/v3.9.5:0a7dcbd, May 3 2021, 17:27:52) [MSC v.1928 64 bit (AMD64)]",
"command_line_arguments": "D:\\Documents\\Tesis\\thesis_env\\Scripts\\mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1678766663"
},
"total": 267.3865055,
"count": 1,
"self": 0.21886089999998148,
"children": {
"run_training.setup": {
"total": 0.08111299999999999,
"count": 1,
"self": 0.08111299999999999
},
"TrainerController.start_learning": {
"total": 267.0865316,
"count": 1,
"self": 0.22487059999775738,
"children": {
"TrainerController._reset_env": {
"total": 4.0322518,
"count": 1,
"self": 4.0322518
},
"TrainerController.advance": {
"total": 262.7266952000022,
"count": 13063,
"self": 0.18488890000031688,
"children": {
"env_step": {
"total": 139.6591986000016,
"count": 13063,
"self": 104.98798670000139,
"children": {
"SubprocessEnvManager._take_step": {
"total": 34.54033080000055,
"count": 13063,
"self": 0.5879332999996407,
"children": {
"TorchPolicy.evaluate": {
"total": 33.952397500000906,
"count": 12563,
"self": 33.952397500000906
}
}
},
"workers": {
"total": 0.13088109999966413,
"count": 13063,
"self": 0.0,
"children": {
"worker_root": {
"total": 263.49024339999903,
"count": 13063,
"is_parallel": true,
"self": 171.8897700999985,
"children": {
"steps_from_proto": {
"total": 0.0007260000000002265,
"count": 1,
"is_parallel": true,
"self": 0.00017640000000040956,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005495999999998169,
"count": 8,
"is_parallel": true,
"self": 0.0005495999999998169
}
}
},
"UnityEnvironment.step": {
"total": 91.59974730000053,
"count": 13063,
"is_parallel": true,
"self": 2.975661800000111,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.0260214000000465,
"count": 13063,
"is_parallel": true,
"self": 2.0260214000000465
},
"communicator.exchange": {
"total": 78.60042420000096,
"count": 13063,
"is_parallel": true,
"self": 78.60042420000096
},
"steps_from_proto": {
"total": 7.997639899999415,
"count": 13063,
"is_parallel": true,
"self": 1.8950215000030557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 6.102618399996359,
"count": 104504,
"is_parallel": true,
"self": 6.102618399996359
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 122.88260770000034,
"count": 13063,
"self": 0.44139130000050386,
"children": {
"process_trajectory": {
"total": 20.549448899999906,
"count": 13063,
"self": 20.549448899999906
},
"_update_policy": {
"total": 101.89176749999993,
"count": 95,
"self": 48.98610009999946,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.90566740000047,
"count": 4566,
"self": 52.90566740000047
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.000000330961484e-07,
"count": 1,
"self": 4.000000330961484e-07
},
"TrainerController._save_models": {
"total": 0.1027136000000155,
"count": 1,
"self": 0.006911699999989196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09580190000002631,
"count": 1,
"self": 0.09580190000002631
}
}
}
}
}
}
}