ppo-Pyramids / run_logs /timers.json
Nazzyk's picture
ppo-v1
d63f8a0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.41740021109580994,
"min": 0.4084530770778656,
"max": 1.427616000175476,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12709.001953125,
"min": 12096.74609375,
"max": 43308.16015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989878.0,
"min": 29904.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989878.0,
"min": 29904.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5382217764854431,
"min": -0.08618258684873581,
"max": 0.5507494211196899,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.7021026611328,
"min": -20.770004272460938,
"max": 156.412841796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.023785008117556572,
"min": -0.008291223086416721,
"max": 0.1980447918176651,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.659802436828613,
"min": -2.188882827758789,
"max": 47.583709716796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06912106197354767,
"min": 0.0654575943099648,
"max": 0.07515509265010176,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0368159296032151,
"min": 0.5260856485507123,
"max": 1.0771723379051157,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014856788707216835,
"min": 0.00016853591737855364,
"max": 0.017409081426670865,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22285183060825253,
"min": 0.002359502843299751,
"max": 0.2437271399733921,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.499737500120003e-06,
"min": 7.499737500120003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011249606250180004,
"min": 0.00011249606250180004,
"max": 0.0036324901891699995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249988000000002,
"min": 0.10249988000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5374982000000001,
"min": 1.3886848,
"max": 2.6108300000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025973801200000014,
"min": 0.00025973801200000014,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003896070180000002,
"min": 0.003896070180000002,
"max": 0.121101917,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0102609908208251,
"min": 0.0102609908208251,
"max": 0.4233998656272888,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15391486883163452,
"min": 0.14916081726551056,
"max": 2.963798999786377,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 335.314606741573,
"min": 335.314606741573,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29843.0,
"min": 16831.0,
"max": 34764.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.597251668404997,
"min": -0.9996833850940069,
"max": 1.6564073893758986,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.15539848804474,
"min": -29.65840169787407,
"max": 143.73559817671776,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.597251668404997,
"min": -0.9996833850940069,
"max": 1.6564073893758986,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.15539848804474,
"min": -29.65840169787407,
"max": 143.73559817671776,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.035534471815440206,
"min": 0.035534471815440206,
"max": 8.957720973035869,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1625679915741784,
"min": 3.1625679915741784,
"max": 152.28125654160976,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679758430",
"python_version": "3.9.9 | packaged by conda-forge | (main, Dec 20 2021, 02:40:17) \n[GCC 9.4.0]",
"command_line_arguments": "/home/nazar/anaconda3/envs/ml-agents/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.0",
"end_time_seconds": "1679762309"
},
"total": 3879.031270471,
"count": 1,
"self": 0.9498452970001381,
"children": {
"run_training.setup": {
"total": 0.014423399999941466,
"count": 1,
"self": 0.014423399999941466
},
"TrainerController.start_learning": {
"total": 3878.067001774,
"count": 1,
"self": 2.0195851750108886,
"children": {
"TrainerController._reset_env": {
"total": 5.801457889000176,
"count": 1,
"self": 5.801457889000176
},
"TrainerController.advance": {
"total": 3870.0836300099886,
"count": 63758,
"self": 2.1410235050429947,
"children": {
"env_step": {
"total": 2616.4802834339544,
"count": 63758,
"self": 2284.8185880236897,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.4751811121182,
"count": 63758,
"self": 8.284652147167435,
"children": {
"TorchPolicy.evaluate": {
"total": 322.1905289649508,
"count": 62569,
"self": 145.7316923569624,
"children": {
"TorchPolicy.sample_actions": {
"total": 176.45883660798836,
"count": 62569,
"self": 176.45883660798836
}
}
}
}
},
"workers": {
"total": 1.1865142981464487,
"count": 63758,
"self": 0.0,
"children": {
"worker_root": {
"total": 3871.6739979709573,
"count": 63758,
"is_parallel": true,
"self": 1728.1290358479673,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001718299999993178,
"count": 1,
"is_parallel": true,
"self": 0.0006205999993653677,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010977000006278104,
"count": 8,
"is_parallel": true,
"self": 0.0010977000006278104
}
}
},
"UnityEnvironment.step": {
"total": 0.05197199999975055,
"count": 1,
"is_parallel": true,
"self": 0.0006343999998534855,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033540000003995374,
"count": 1,
"is_parallel": true,
"self": 0.00033540000003995374
},
"communicator.exchange": {
"total": 0.049614800000199466,
"count": 1,
"is_parallel": true,
"self": 0.049614800000199466
},
"steps_from_proto": {
"total": 0.0013873999996576458,
"count": 1,
"is_parallel": true,
"self": 0.0002951999990727927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010922000005848531,
"count": 8,
"is_parallel": true,
"self": 0.0010922000005848531
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2143.54496212299,
"count": 63757,
"is_parallel": true,
"self": 29.05216696012485,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.692247121888613,
"count": 63757,
"is_parallel": true,
"self": 20.692247121888613
},
"communicator.exchange": {
"total": 2007.1528463029636,
"count": 63757,
"is_parallel": true,
"self": 2007.1528463029636
},
"steps_from_proto": {
"total": 86.64770173801298,
"count": 63757,
"is_parallel": true,
"self": 23.28825018173029,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.35945155628269,
"count": 510056,
"is_parallel": true,
"self": 63.35945155628269
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1251.4623230709913,
"count": 63758,
"self": 3.262913673921048,
"children": {
"process_trajectory": {
"total": 234.30480888806915,
"count": 63758,
"self": 233.92338785206857,
"children": {
"RLTrainer._checkpoint": {
"total": 0.38142103600057453,
"count": 2,
"self": 0.38142103600057453
}
}
},
"_update_policy": {
"total": 1013.8946005090011,
"count": 454,
"self": 251.09265204299345,
"children": {
"TorchPPOOptimizer.update": {
"total": 762.8019484660076,
"count": 22794,
"self": 762.8019484660076
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2000000424450263e-06,
"count": 1,
"self": 1.2000000424450263e-06
},
"TrainerController._save_models": {
"total": 0.16232750000017404,
"count": 1,
"self": 0.002415100000689563,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15991239999948448,
"count": 1,
"self": 0.15991239999948448
}
}
}
}
}
}
}