DiegoD616's picture
New SnowballTarget agent
0c092a0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8154339790344238,
"min": 0.8154339790344238,
"max": 2.8451220989227295,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7785.763671875,
"min": 7785.763671875,
"max": 29136.89453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.91140365600586,
"min": 0.49776172637939453,
"max": 12.942049980163574,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2517.7236328125,
"min": 96.5657730102539,
"max": 2640.17822265625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06359168905114262,
"min": 0.06202297932377763,
"max": 0.0800945878888574,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2543667562045705,
"min": 0.2502903652291256,
"max": 0.3573350444091072,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20380407243090518,
"min": 0.15080880857867135,
"max": 0.27437453570903514,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8152162897236207,
"min": 0.6032352343146854,
"max": 1.3458589439225548,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.068181818181817,
"min": 4.090909090909091,
"max": 25.772727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1103.0,
"min": 180.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.068181818181817,
"min": 4.090909090909091,
"max": 25.772727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1103.0,
"min": 180.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673991217",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ../config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673991639"
},
"total": 422.3092733899998,
"count": 1,
"self": 0.4323425570000836,
"children": {
"run_training.setup": {
"total": 0.11196192399984284,
"count": 1,
"self": 0.11196192399984284
},
"TrainerController.start_learning": {
"total": 421.7649689089999,
"count": 1,
"self": 0.5011394040038795,
"children": {
"TrainerController._reset_env": {
"total": 10.77781719199993,
"count": 1,
"self": 10.77781719199993
},
"TrainerController.advance": {
"total": 410.36675780299583,
"count": 18204,
"self": 0.2566635199477787,
"children": {
"env_step": {
"total": 410.11009428304806,
"count": 18204,
"self": 267.57270200406265,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.2821796570165,
"count": 18204,
"self": 1.3734700509949107,
"children": {
"TorchPolicy.evaluate": {
"total": 140.9087096060216,
"count": 18204,
"self": 31.18787847998601,
"children": {
"TorchPolicy.sample_actions": {
"total": 109.72083112603559,
"count": 18204,
"self": 109.72083112603559
}
}
}
}
},
"workers": {
"total": 0.2552126219688944,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 420.5395425530387,
"count": 18204,
"is_parallel": true,
"self": 202.454788750018,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006746048999957566,
"count": 1,
"is_parallel": true,
"self": 0.003949422999994567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0027966259999629983,
"count": 10,
"is_parallel": true,
"self": 0.0027966259999629983
}
}
},
"UnityEnvironment.step": {
"total": 0.04130765599984443,
"count": 1,
"is_parallel": true,
"self": 0.0005060180001237313,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002774599997792393,
"count": 1,
"is_parallel": true,
"self": 0.0002774599997792393
},
"communicator.exchange": {
"total": 0.03873581500010914,
"count": 1,
"is_parallel": true,
"self": 0.03873581500010914
},
"steps_from_proto": {
"total": 0.0017883629998323158,
"count": 1,
"is_parallel": true,
"self": 0.0004015099993921467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013868530004401691,
"count": 10,
"is_parallel": true,
"self": 0.0013868530004401691
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 218.0847538030207,
"count": 18203,
"is_parallel": true,
"self": 8.223686669050949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.774689711994142,
"count": 18203,
"is_parallel": true,
"self": 4.774689711994142
},
"communicator.exchange": {
"total": 173.81289335797464,
"count": 18203,
"is_parallel": true,
"self": 173.81289335797464
},
"steps_from_proto": {
"total": 31.273484064000968,
"count": 18203,
"is_parallel": true,
"self": 6.312190574005854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.961293489995114,
"count": 182030,
"is_parallel": true,
"self": 24.961293489995114
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.9330000112822745e-05,
"count": 1,
"self": 5.9330000112822745e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 407.5103010859975,
"count": 340208,
"is_parallel": true,
"self": 8.908186267210112,
"children": {
"process_trajectory": {
"total": 234.80310253978723,
"count": 340208,
"is_parallel": true,
"self": 233.67606060378694,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1270419360002961,
"count": 4,
"is_parallel": true,
"self": 1.1270419360002961
}
}
},
"_update_policy": {
"total": 163.79901227900018,
"count": 90,
"is_parallel": true,
"self": 41.22448571901259,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.57452655998759,
"count": 4584,
"is_parallel": true,
"self": 122.57452655998759
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1191951800001334,
"count": 1,
"self": 0.0008971280003606807,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11829805199977272,
"count": 1,
"self": 0.11829805199977272
}
}
}
}
}
}
}