dzegan's picture
First Push
7fcbfec
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8590760231018066,
"min": 0.8590760231018066,
"max": 2.8730478286743164,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8202.4580078125,
"min": 8202.4580078125,
"max": 29422.8828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.923359870910645,
"min": 0.2999325394630432,
"max": 12.923359870910645,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2520.05517578125,
"min": 58.186912536621094,
"max": 2608.63916015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07054675454147917,
"min": 0.0655887644096248,
"max": 0.07737811971450106,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2821870181659167,
"min": 0.2623550576384992,
"max": 0.3868905985725053,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19822799180652578,
"min": 0.11528187102807101,
"max": 0.2886352606994264,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7929119672261031,
"min": 0.46112748411228405,
"max": 1.3075181587654001,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.681818181818183,
"min": 2.7045454545454546,
"max": 25.681818181818183,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1130.0,
"min": 119.0,
"max": 1399.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.681818181818183,
"min": 2.7045454545454546,
"max": 25.681818181818183,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1130.0,
"min": 119.0,
"max": 1399.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673377665",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673378124"
},
"total": 458.48562408600014,
"count": 1,
"self": 0.38695118100008585,
"children": {
"run_training.setup": {
"total": 0.1127663360000497,
"count": 1,
"self": 0.1127663360000497
},
"TrainerController.start_learning": {
"total": 457.985906569,
"count": 1,
"self": 0.5020680219984115,
"children": {
"TrainerController._reset_env": {
"total": 7.1941388030002145,
"count": 1,
"self": 7.1941388030002145
},
"TrainerController.advance": {
"total": 450.1670302730013,
"count": 18202,
"self": 0.28670219800415,
"children": {
"env_step": {
"total": 449.8803280749971,
"count": 18202,
"self": 295.74020761400016,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.84246501600614,
"count": 18202,
"self": 1.4576251920038885,
"children": {
"TorchPolicy.evaluate": {
"total": 152.38483982400226,
"count": 18202,
"self": 34.05815577301405,
"children": {
"TorchPolicy.sample_actions": {
"total": 118.3266840509882,
"count": 18202,
"self": 118.3266840509882
}
}
}
}
},
"workers": {
"total": 0.2976554449908235,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.68086615200855,
"count": 18202,
"is_parallel": true,
"self": 220.70415246399625,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006128154000180075,
"count": 1,
"is_parallel": true,
"self": 0.0034503159999985655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026778380001815094,
"count": 10,
"is_parallel": true,
"self": 0.0026778380001815094
}
}
},
"UnityEnvironment.step": {
"total": 0.03690045299981648,
"count": 1,
"is_parallel": true,
"self": 0.000542112999710298,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003666739999061974,
"count": 1,
"is_parallel": true,
"self": 0.0003666739999061974
},
"communicator.exchange": {
"total": 0.033805561999997735,
"count": 1,
"is_parallel": true,
"self": 0.033805561999997735
},
"steps_from_proto": {
"total": 0.002186104000202249,
"count": 1,
"is_parallel": true,
"self": 0.0005286969999360736,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016574070002661756,
"count": 10,
"is_parallel": true,
"self": 0.0016574070002661756
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 235.9767136880123,
"count": 18201,
"is_parallel": true,
"self": 8.874657269032014,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.45929784899181,
"count": 18201,
"is_parallel": true,
"self": 5.45929784899181
},
"communicator.exchange": {
"total": 188.18157183399808,
"count": 18201,
"is_parallel": true,
"self": 188.18157183399808
},
"steps_from_proto": {
"total": 33.461186735990395,
"count": 18201,
"is_parallel": true,
"self": 7.147022061956704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.31416467403369,
"count": 182010,
"is_parallel": true,
"self": 26.31416467403369
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.489000000125088e-05,
"count": 1,
"self": 5.489000000125088e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 446.57723848507385,
"count": 357751,
"is_parallel": true,
"self": 9.875825593059744,
"children": {
"process_trajectory": {
"total": 254.8778461280142,
"count": 357751,
"is_parallel": true,
"self": 254.0966950870145,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7811510409997027,
"count": 4,
"is_parallel": true,
"self": 0.7811510409997027
}
}
},
"_update_policy": {
"total": 181.8235667639999,
"count": 90,
"is_parallel": true,
"self": 48.07120142498911,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.7523653390108,
"count": 4587,
"is_parallel": true,
"self": 133.7523653390108
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12261458100010714,
"count": 1,
"self": 0.0008544990000700636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12176008200003707,
"count": 1,
"self": 0.12176008200003707
}
}
}
}
}
}
}