FBM's picture
max steps 20000000
908615c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5269735455513,
"min": 0.5269735455513,
"max": 2.8357203006744385,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5437.31298828125,
"min": 5105.45654296875,
"max": 29196.576171875,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 15.033926963806152,
"min": 0.4628317654132843,
"max": 15.097548484802246,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 3081.955078125,
"min": 89.78936004638672,
"max": 3085.62646484375,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07078048211322954,
"min": 0.06503697658053118,
"max": 0.07793410775942475,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3539024105661477,
"min": 0.26219758718789843,
"max": 0.3824035105034401,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.11107677399673883,
"min": 0.0889188205001547,
"max": 0.24954896199352605,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5553838699836942,
"min": 0.3556752820006188,
"max": 1.2477448099676303,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000038e-07,
"min": 7.032997656000038e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.516498828000019e-06,
"min": 3.516498828000019e-06,
"max": 0.001488516003828,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.9961720000000001,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000063e-05,
"min": 2.1696560000000063e-05,
"max": 0.00498649706,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000032,
"min": 0.00010848280000000032,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 29.454545454545453,
"min": 3.7045454545454546,
"max": 29.75,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1620.0,
"min": 163.0,
"max": 1629.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 29.454545454545453,
"min": 3.7045454545454546,
"max": 29.75,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1620.0,
"min": 163.0,
"max": 1629.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673520245",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673526354"
},
"total": 6108.450217839001,
"count": 1,
"self": 0.38215958300133934,
"children": {
"run_training.setup": {
"total": 0.10299456399980045,
"count": 1,
"self": 0.10299456399980045
},
"TrainerController.start_learning": {
"total": 6107.965063692,
"count": 1,
"self": 4.670113652085092,
"children": {
"TrainerController._reset_env": {
"total": 5.953856518000066,
"count": 1,
"self": 5.953856518000066
},
"TrainerController.advance": {
"total": 6097.172750531914,
"count": 181879,
"self": 2.3390944627126373,
"children": {
"env_step": {
"total": 6094.833656069201,
"count": 181879,
"self": 4547.217765226418,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1545.1520703661208,
"count": 181879,
"self": 13.06588448885941,
"children": {
"TorchPolicy.evaluate": {
"total": 1532.0861858772614,
"count": 181879,
"self": 303.9904630213814,
"children": {
"TorchPolicy.sample_actions": {
"total": 1228.09572285588,
"count": 181879,
"self": 1228.09572285588
}
}
}
}
},
"workers": {
"total": 2.4638204766624767,
"count": 181879,
"self": 0.0,
"children": {
"worker_root": {
"total": 6097.303847972106,
"count": 181879,
"is_parallel": true,
"self": 4068.3793781252202,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018383219999122957,
"count": 1,
"is_parallel": true,
"self": 0.0006221360004019516,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012161859995103441,
"count": 10,
"is_parallel": true,
"self": 0.0012161859995103441
}
}
},
"UnityEnvironment.step": {
"total": 0.03766531800010853,
"count": 1,
"is_parallel": true,
"self": 0.00045596099971589865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00027069700036008726,
"count": 1,
"is_parallel": true,
"self": 0.00027069700036008726
},
"communicator.exchange": {
"total": 0.035113329000068916,
"count": 1,
"is_parallel": true,
"self": 0.035113329000068916
},
"steps_from_proto": {
"total": 0.0018253309999636258,
"count": 1,
"is_parallel": true,
"self": 0.0004180810001344071,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014072499998292187,
"count": 10,
"is_parallel": true,
"self": 0.0014072499998292187
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2028.924469846886,
"count": 181878,
"is_parallel": true,
"self": 80.32223734537229,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 48.73312923412368,
"count": 181878,
"is_parallel": true,
"self": 48.73312923412368
},
"communicator.exchange": {
"total": 1584.0304515685566,
"count": 181878,
"is_parallel": true,
"self": 1584.0304515685566
},
"steps_from_proto": {
"total": 315.8386516988335,
"count": 181878,
"is_parallel": true,
"self": 62.35403560249006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 253.48461609634342,
"count": 1818780,
"is_parallel": true,
"self": 253.48461609634342
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.2252999264746904e-05,
"count": 1,
"self": 4.2252999264746904e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6073.711755697596,
"count": 2898609,
"is_parallel": true,
"self": 71.1986653173426,
"children": {
"process_trajectory": {
"total": 2506.8915254542585,
"count": 2898609,
"is_parallel": true,
"self": 2496.2475137062534,
"children": {
"RLTrainer._checkpoint": {
"total": 10.644011748005141,
"count": 40,
"is_parallel": true,
"self": 10.644011748005141
}
}
},
"_update_policy": {
"total": 3495.6215649259952,
"count": 909,
"is_parallel": true,
"self": 1329.040408200864,
"children": {
"TorchPPOOptimizer.update": {
"total": 2166.581156725131,
"count": 154480,
"is_parallel": true,
"self": 2166.581156725131
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16830073700111825,
"count": 1,
"self": 0.0011941270022362005,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16710660999888205,
"count": 1,
"self": 0.16710660999888205
}
}
}
}
}
}
}