msladic's picture
First version
b20613b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0346591472625732,
"min": 0.9746885895729065,
"max": 2.890343427658081,
"count": 500
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 819.4500122070312,
"min": 688.1884765625,
"max": 4323.953125,
"count": 500
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 960.0,
"max": 499976.0,
"count": 500
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 960.0,
"max": 499976.0,
"count": 500
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.153270721435547,
"min": 0.007912376895546913,
"max": 13.162095069885254,
"count": 500
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 328.8317565917969,
"min": 0.14325539767742157,
"max": 341.9119567871094,
"count": 500
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 227
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 2189.0,
"min": 2189.0,
"max": 2189.0,
"count": 227
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.0,
"min": 2.0,
"max": 30.0,
"count": 272
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 297.0,
"min": 2.0,
"max": 298.0,
"count": 272
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.0,
"min": 2.0,
"max": 30.0,
"count": 272
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 297.0,
"min": 2.0,
"max": 298.0,
"count": 272
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06900935006078726,
"min": 0.06337211986610783,
"max": 0.07876657630501237,
"count": 47
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06900935006078726,
"min": 0.06337211986610783,
"max": 0.07876657630501237,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18842259502659242,
"min": 0.08759426939136841,
"max": 0.3012457628423969,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.18842259502659242,
"min": 0.08759426939136841,
"max": 0.3012457628423969,
"count": 47
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.217698260799989e-06,
"min": 5.217698260799989e-06,
"max": 0.00029345280218239997,
"count": 47
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.217698260799989e-06,
"min": 5.217698260799989e-06,
"max": 0.00029345280218239997,
"count": 47
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10173919999999997,
"min": 0.10173919999999997,
"max": 0.1978176,
"count": 47
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10173919999999997,
"min": 0.10173919999999997,
"max": 0.1978176,
"count": 47
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.678607999999985e-05,
"min": 9.678607999999985e-05,
"max": 0.004891098240000001,
"count": 47
},
"SnowballTarget.Policy.Beta.sum": {
"value": 9.678607999999985e-05,
"min": 9.678607999999985e-05,
"max": 0.004891098240000001,
"count": 47
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688463815",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688464841"
},
"total": 1026.132813287,
"count": 1,
"self": 0.43033897400005117,
"children": {
"run_training.setup": {
"total": 0.07422579600006429,
"count": 1,
"self": 0.07422579600006429
},
"TrainerController.start_learning": {
"total": 1025.628248517,
"count": 1,
"self": 0.950450523988593,
"children": {
"TrainerController._reset_env": {
"total": 4.177745281999933,
"count": 1,
"self": 4.177745281999933
},
"TrainerController.advance": {
"total": 1020.3662540700116,
"count": 45464,
"self": 0.9553208119812098,
"children": {
"env_step": {
"total": 725.645627825035,
"count": 45464,
"self": 596.7440147520272,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.32300786600422,
"count": 45464,
"self": 4.700736279005241,
"children": {
"TorchPolicy.evaluate": {
"total": 123.62227158699898,
"count": 45464,
"self": 123.62227158699898
}
}
},
"workers": {
"total": 0.5786052070036476,
"count": 45464,
"self": 0.0,
"children": {
"worker_root": {
"total": 1022.3441723780186,
"count": 45464,
"is_parallel": true,
"self": 494.5149807870155,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005877820000023348,
"count": 1,
"is_parallel": true,
"self": 0.004268005999847446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001609814000175902,
"count": 10,
"is_parallel": true,
"self": 0.001609814000175902
}
}
},
"UnityEnvironment.step": {
"total": 0.03400904399995852,
"count": 1,
"is_parallel": true,
"self": 0.0005881500001123641,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041894099990713585,
"count": 1,
"is_parallel": true,
"self": 0.00041894099990713585
},
"communicator.exchange": {
"total": 0.031019872999877407,
"count": 1,
"is_parallel": true,
"self": 0.031019872999877407
},
"steps_from_proto": {
"total": 0.0019820800000616146,
"count": 1,
"is_parallel": true,
"self": 0.0003639249998741434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016181550001874712,
"count": 10,
"is_parallel": true,
"self": 0.0016181550001874712
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 527.8291915910031,
"count": 45463,
"is_parallel": true,
"self": 25.398675316996787,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.252279808996263,
"count": 45463,
"is_parallel": true,
"self": 13.252279808996263
},
"communicator.exchange": {
"total": 407.52850907999346,
"count": 45463,
"is_parallel": true,
"self": 407.52850907999346
},
"steps_from_proto": {
"total": 81.6497273850166,
"count": 45463,
"is_parallel": true,
"self": 14.465802212058179,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.18392517295842,
"count": 454630,
"is_parallel": true,
"self": 67.18392517295842
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 293.7653054329953,
"count": 45464,
"self": 1.1334024810230403,
"children": {
"process_trajectory": {
"total": 59.26647733897289,
"count": 45464,
"self": 57.49198115897275,
"children": {
"RLTrainer._checkpoint": {
"total": 1.774496180000142,
"count": 10,
"self": 1.774496180000142
}
}
},
"_update_policy": {
"total": 233.3654256129994,
"count": 47,
"self": 126.88102578098528,
"children": {
"TorchPPOOptimizer.update": {
"total": 106.4843998320141,
"count": 11460,
"self": 106.4843998320141
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0670000847312622e-06,
"count": 1,
"self": 1.0670000847312622e-06
},
"TrainerController._save_models": {
"total": 0.13379757399980008,
"count": 1,
"self": 0.0010999399996762804,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1326976340001238,
"count": 1,
"self": 0.1326976340001238
}
}
}
}
}
}
}