ppo-Pyramids / run_logs /timers.json
AriYusa's picture
First Push
3294f97 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.46871811151504517,
"min": 0.46871811151504517,
"max": 1.4347219467163086,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14091.541015625,
"min": 14091.541015625,
"max": 43523.7265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989902.0,
"min": 29910.0,
"max": 989902.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989902.0,
"min": 29910.0,
"max": 989902.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41535234451293945,
"min": -0.12590618431568146,
"max": 0.41535234451293945,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 110.89907836914062,
"min": -30.217483520507812,
"max": 110.89907836914062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00023446280101779848,
"min": -0.059020914137363434,
"max": 0.203892782330513,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.06260156631469727,
"min": -15.109354019165039,
"max": 49.138160705566406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07050569612918133,
"min": 0.06477310515656401,
"max": 0.07619860619156672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9870797458085385,
"min": 0.600380818041384,
"max": 1.0669108343883598,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015642743894810945,
"min": 0.00023774593981955463,
"max": 0.015642743894810945,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21899841452735322,
"min": 0.002615205338015101,
"max": 0.21899841452735322,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.403283246557142e-06,
"min": 7.403283246557142e-06,
"max": 0.00029528835157055,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001036459654518,
"min": 0.0001036459654518,
"max": 0.003223104125632,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246772857142858,
"min": 0.10246772857142858,
"max": 0.19842945,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345482,
"min": 1.4345482,
"max": 2.4425243000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002565260842857143,
"min": 0.0002565260842857143,
"max": 0.009843102055,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00359136518,
"min": 0.00359136518,
"max": 0.10744936319999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00963502936065197,
"min": 0.00963502936065197,
"max": 0.34898096323013306,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13489040732383728,
"min": 0.13489040732383728,
"max": 2.7918477058410645,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 408.34722222222223,
"min": 408.34722222222223,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29401.0,
"min": 16709.0,
"max": 32349.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4527110898246367,
"min": -0.9999742455059483,
"max": 1.4527110898246367,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 104.59519846737385,
"min": -31.998401656746864,
"max": 104.59519846737385,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4527110898246367,
"min": -0.9999742455059483,
"max": 1.4527110898246367,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 104.59519846737385,
"min": -31.998401656746864,
"max": 104.59519846737385,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.040899573755773924,
"min": 0.040899573755773924,
"max": 6.261625605470994,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9447693104157224,
"min": 2.9447693104157224,
"max": 106.4476352930069,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739994216",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739996344"
},
"total": 2127.233976557,
"count": 1,
"self": 0.8204664959998809,
"children": {
"run_training.setup": {
"total": 0.02116938900007881,
"count": 1,
"self": 0.02116938900007881
},
"TrainerController.start_learning": {
"total": 2126.392340672,
"count": 1,
"self": 1.2762381159786855,
"children": {
"TrainerController._reset_env": {
"total": 2.729924062000009,
"count": 1,
"self": 2.729924062000009
},
"TrainerController.advance": {
"total": 2122.252585556022,
"count": 63602,
"self": 1.3035594428183686,
"children": {
"env_step": {
"total": 1433.6145528131192,
"count": 63602,
"self": 1283.9428846619885,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.9488474340751,
"count": 63602,
"self": 4.615752209114817,
"children": {
"TorchPolicy.evaluate": {
"total": 144.3330952249603,
"count": 62575,
"self": 144.3330952249603
}
}
},
"workers": {
"total": 0.7228207170555834,
"count": 63602,
"self": 0.0,
"children": {
"worker_root": {
"total": 2121.6632734419863,
"count": 63602,
"is_parallel": true,
"self": 945.3097322231006,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001988934000110021,
"count": 1,
"is_parallel": true,
"self": 0.0006815709998591046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013073630002509162,
"count": 8,
"is_parallel": true,
"self": 0.0013073630002509162
}
}
},
"UnityEnvironment.step": {
"total": 0.0921219540000493,
"count": 1,
"is_parallel": true,
"self": 0.0005481130001498968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045441699967341265,
"count": 1,
"is_parallel": true,
"self": 0.00045441699967341265
},
"communicator.exchange": {
"total": 0.08925518700016255,
"count": 1,
"is_parallel": true,
"self": 0.08925518700016255
},
"steps_from_proto": {
"total": 0.00186423700006344,
"count": 1,
"is_parallel": true,
"self": 0.0004173259994786349,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014469110005848052,
"count": 8,
"is_parallel": true,
"self": 0.0014469110005848052
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1176.3535412188858,
"count": 63601,
"is_parallel": true,
"self": 31.592030050806443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.815404827963448,
"count": 63601,
"is_parallel": true,
"self": 22.815404827963448
},
"communicator.exchange": {
"total": 1027.6999074809878,
"count": 63601,
"is_parallel": true,
"self": 1027.6999074809878
},
"steps_from_proto": {
"total": 94.24619885912807,
"count": 63601,
"is_parallel": true,
"self": 18.42477378976946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.82142506935861,
"count": 508808,
"is_parallel": true,
"self": 75.82142506935861
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 687.3344733000845,
"count": 63602,
"self": 2.3620640490817095,
"children": {
"process_trajectory": {
"total": 127.72672320000402,
"count": 63602,
"self": 127.48533132000375,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2413918800002648,
"count": 2,
"self": 0.2413918800002648
}
}
},
"_update_policy": {
"total": 557.2456860509988,
"count": 445,
"self": 305.39139048403376,
"children": {
"TorchPPOOptimizer.update": {
"total": 251.854295566965,
"count": 22845,
"self": 251.854295566965
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.469996828353032e-07,
"count": 1,
"self": 8.469996828353032e-07
},
"TrainerController._save_models": {
"total": 0.13359209099962754,
"count": 1,
"self": 0.0016830699996717158,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13190902099995583,
"count": 1,
"self": 0.13190902099995583
}
}
}
}
}
}
}