ppo-Pyramids / run_logs /timers.json
Your-Cheese's picture
Pyramids trained with PPO
e6550f0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6313811540603638,
"min": 0.6034187078475952,
"max": 1.4409093856811523,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19103.068359375,
"min": 18025.32421875,
"max": 43711.42578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4560580849647522,
"min": -0.10676559805870056,
"max": 0.47675904631614685,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 121.31144714355469,
"min": -25.73050880432129,
"max": 128.72494506835938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.011600414291024208,
"min": -0.011600414291024208,
"max": 0.5159286260604858,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.085710287094116,
"min": -3.085710287094116,
"max": 122.27507781982422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06993448793322646,
"min": 0.0652113833164957,
"max": 0.07423224825233685,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9790828310651705,
"min": 0.5196257377663579,
"max": 1.0350402918217394,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014704336578558599,
"min": 0.0008042464154037947,
"max": 0.014704336578558599,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20586071209982038,
"min": 0.010455203400249332,
"max": 0.20586071209982038,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.68406172439285e-06,
"min": 7.68406172439285e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010757686414149991,
"min": 0.00010757686414149991,
"max": 0.0035075399308200994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256132142857144,
"min": 0.10256132142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358585000000001,
"min": 1.3886848,
"max": 2.5691799,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026587601071428554,
"min": 0.00026587601071428554,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037222641499999975,
"min": 0.0037222641499999975,
"max": 0.11694107201000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012524468824267387,
"min": 0.012524468824267387,
"max": 0.6499853134155273,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17534255981445312,
"min": 0.17534255981445312,
"max": 4.549897193908691,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 383.71232876712327,
"min": 383.71232876712327,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28011.0,
"min": 15984.0,
"max": 32730.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.561468472729807,
"min": -1.0000000521540642,
"max": 1.561468472729807,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 113.98719850927591,
"min": -30.72200171649456,
"max": 113.98719850927591,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.561468472729807,
"min": -1.0000000521540642,
"max": 1.561468472729807,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 113.98719850927591,
"min": -30.72200171649456,
"max": 113.98719850927591,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.050786195242636334,
"min": 0.050786195242636334,
"max": 13.118744229897857,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.7073922527124523,
"min": 3.7073922527124523,
"max": 209.8999076783657,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677046262",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677048537"
},
"total": 2274.664001583,
"count": 1,
"self": 1.2393360470005064,
"children": {
"run_training.setup": {
"total": 0.11436614099989129,
"count": 1,
"self": 0.11436614099989129
},
"TrainerController.start_learning": {
"total": 2273.3102993949997,
"count": 1,
"self": 1.3646838680629116,
"children": {
"TrainerController._reset_env": {
"total": 7.055377204000024,
"count": 1,
"self": 7.055377204000024
},
"TrainerController.advance": {
"total": 2264.762320562936,
"count": 63558,
"self": 1.424368734868949,
"children": {
"env_step": {
"total": 1531.2463507810062,
"count": 63558,
"self": 1416.9655686490212,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.44411475696711,
"count": 63558,
"self": 4.609372502006863,
"children": {
"TorchPolicy.evaluate": {
"total": 108.83474225496025,
"count": 62569,
"self": 36.27760122697168,
"children": {
"TorchPolicy.sample_actions": {
"total": 72.55714102798856,
"count": 62569,
"self": 72.55714102798856
}
}
}
}
},
"workers": {
"total": 0.8366673750178961,
"count": 63558,
"self": 0.0,
"children": {
"worker_root": {
"total": 2268.2089421470323,
"count": 63558,
"is_parallel": true,
"self": 963.3069745310763,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017414640001334192,
"count": 1,
"is_parallel": true,
"self": 0.000648170000204118,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010932939999293012,
"count": 8,
"is_parallel": true,
"self": 0.0010932939999293012
}
}
},
"UnityEnvironment.step": {
"total": 0.07500119199994515,
"count": 1,
"is_parallel": true,
"self": 0.0006081899998662266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004354430000148568,
"count": 1,
"is_parallel": true,
"self": 0.0004354430000148568
},
"communicator.exchange": {
"total": 0.07228820299997096,
"count": 1,
"is_parallel": true,
"self": 0.07228820299997096
},
"steps_from_proto": {
"total": 0.001669356000093103,
"count": 1,
"is_parallel": true,
"self": 0.0004230270005791681,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012463289995139348,
"count": 8,
"is_parallel": true,
"self": 0.0012463289995139348
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1304.901967615956,
"count": 63557,
"is_parallel": true,
"self": 30.926272155952574,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.230609080029808,
"count": 63557,
"is_parallel": true,
"self": 22.230609080029808
},
"communicator.exchange": {
"total": 1159.0714750829775,
"count": 63557,
"is_parallel": true,
"self": 1159.0714750829775
},
"steps_from_proto": {
"total": 92.67361129699611,
"count": 63557,
"is_parallel": true,
"self": 21.912204534977263,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.76140676201885,
"count": 508456,
"is_parallel": true,
"self": 70.76140676201885
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 732.0916010470607,
"count": 63558,
"self": 2.6004121770647544,
"children": {
"process_trajectory": {
"total": 160.82726048799304,
"count": 63558,
"self": 160.64144108099254,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18581940700050836,
"count": 2,
"self": 0.18581940700050836
}
}
},
"_update_policy": {
"total": 568.663928382003,
"count": 445,
"self": 216.8288978460057,
"children": {
"TorchPPOOptimizer.update": {
"total": 351.83503053599725,
"count": 22809,
"self": 351.83503053599725
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0310000106983352e-06,
"count": 1,
"self": 1.0310000106983352e-06
},
"TrainerController._save_models": {
"total": 0.12791672900038975,
"count": 1,
"self": 0.0018859690003409924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12603076000004876,
"count": 1,
"self": 0.12603076000004876
}
}
}
}
}
}
}