ppo-Huggy / run_logs /timers.json
dbaibak's picture
Huggy
0d483c3
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.412527322769165,
"min": 1.412527322769165,
"max": 1.4286195039749146,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70504.890625,
"min": 69039.0,
"max": 76162.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.61754385964912,
"min": 82.87248322147651,
"max": 399.0,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49372.0,
"min": 48796.0,
"max": 50249.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999952.0,
"min": 49673.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999952.0,
"min": 49673.0,
"max": 1999952.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.417802333831787,
"min": 0.03630182892084122,
"max": 2.433789014816284,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1378.1473388671875,
"min": 4.501426696777344,
"max": 1381.04296875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8368468147620822,
"min": 1.7603933972817274,
"max": 3.951794030864316,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2187.0026844143867,
"min": 218.2887812629342,
"max": 2189.293893098831,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8368468147620822,
"min": 1.7603933972817274,
"max": 3.951794030864316,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2187.0026844143867,
"min": 218.2887812629342,
"max": 2189.293893098831,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017072539894757533,
"min": 0.012367759072609865,
"max": 0.01985546180876554,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0512176196842726,
"min": 0.02473551814521973,
"max": 0.057222999299907915,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05808950687448183,
"min": 0.02323387327293555,
"max": 0.060146518113712466,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1742685206234455,
"min": 0.048964376933872705,
"max": 0.1761999849230051,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7964487345500032e-06,
"min": 3.7964487345500032e-06,
"max": 0.00029532292655902497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.138934620365001e-05,
"min": 1.138934620365001e-05,
"max": 0.0008440440186519999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126544999999998,
"min": 0.10126544999999998,
"max": 0.19844097500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30379634999999994,
"min": 0.2077036,
"max": 0.5813479999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.314595500000007e-05,
"min": 7.314595500000007e-05,
"max": 0.0049222046525,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002194378650000002,
"min": 0.0002194378650000002,
"max": 0.014069265200000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670680707",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670682958"
},
"total": 2251.002594623,
"count": 1,
"self": 0.44284688500010816,
"children": {
"run_training.setup": {
"total": 0.11971305800000209,
"count": 1,
"self": 0.11971305800000209
},
"TrainerController.start_learning": {
"total": 2250.44003468,
"count": 1,
"self": 3.9290990220388267,
"children": {
"TrainerController._reset_env": {
"total": 10.988467049000064,
"count": 1,
"self": 10.988467049000064
},
"TrainerController.advance": {
"total": 2235.3918095999607,
"count": 232005,
"self": 4.044793448121254,
"children": {
"env_step": {
"total": 1741.4256529628665,
"count": 232005,
"self": 1460.2780648927196,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.4826052060573,
"count": 232005,
"self": 14.666893420998917,
"children": {
"TorchPolicy.evaluate": {
"total": 263.8157117850584,
"count": 222923,
"self": 66.48538989407132,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.33032189098708,
"count": 222923,
"self": 197.33032189098708
}
}
}
}
},
"workers": {
"total": 2.664982864089666,
"count": 232005,
"self": 0.0,
"children": {
"worker_root": {
"total": 2242.776377656912,
"count": 232005,
"is_parallel": true,
"self": 1045.8251354709325,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005006766000064999,
"count": 1,
"is_parallel": true,
"self": 0.0003521530001080464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004654612999956953,
"count": 2,
"is_parallel": true,
"self": 0.004654612999956953
}
}
},
"UnityEnvironment.step": {
"total": 0.02804875300000731,
"count": 1,
"is_parallel": true,
"self": 0.0003007030001072053,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023332699993261485,
"count": 1,
"is_parallel": true,
"self": 0.00023332699993261485
},
"communicator.exchange": {
"total": 0.026760359999911998,
"count": 1,
"is_parallel": true,
"self": 0.026760359999911998
},
"steps_from_proto": {
"total": 0.0007543630000554913,
"count": 1,
"is_parallel": true,
"self": 0.00026274000015291676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004916229999025745,
"count": 2,
"is_parallel": true,
"self": 0.0004916229999025745
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1196.9512421859793,
"count": 232004,
"is_parallel": true,
"self": 35.33197642808136,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.66660132195625,
"count": 232004,
"is_parallel": true,
"self": 75.66660132195625
},
"communicator.exchange": {
"total": 991.9481841939524,
"count": 232004,
"is_parallel": true,
"self": 991.9481841939524
},
"steps_from_proto": {
"total": 94.00448024198909,
"count": 232004,
"is_parallel": true,
"self": 38.51650048094132,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.48797976104777,
"count": 464008,
"is_parallel": true,
"self": 55.48797976104777
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.9213631889728,
"count": 232005,
"self": 6.031210014979024,
"children": {
"process_trajectory": {
"total": 150.1205823259936,
"count": 232005,
"self": 149.63221382299366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48836850299994694,
"count": 4,
"self": 0.48836850299994694
}
}
},
"_update_policy": {
"total": 333.76957084800017,
"count": 97,
"self": 278.9866704039913,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.782900444008874,
"count": 2910,
"self": 54.782900444008874
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.760002856433857e-07,
"count": 1,
"self": 8.760002856433857e-07
},
"TrainerController._save_models": {
"total": 0.13065813299999718,
"count": 1,
"self": 0.002744568999787589,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1279135640002096,
"count": 1,
"self": 0.1279135640002096
}
}
}
}
}
}
}