Huggy / run_logs /timers.json
AkRl1's picture
Huggy
89552e8 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4090205430984497,
"min": 1.4090205430984497,
"max": 1.4288980960845947,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70336.8984375,
"min": 69106.875,
"max": 77800.0859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.06551724137931,
"min": 84.66380789022298,
"max": 396.244094488189,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49338.0,
"min": 48858.0,
"max": 50323.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999932.0,
"min": 49868.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999932.0,
"min": 49868.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3656845092773438,
"min": 0.19065605103969574,
"max": 2.4591500759124756,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1372.0970458984375,
"min": 24.022663116455078,
"max": 1391.8194580078125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6786433417221596,
"min": 1.9913737050124578,
"max": 3.9373653899834777,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2133.6131381988525,
"min": 250.91308683156967,
"max": 2214.2372445464134,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6786433417221596,
"min": 1.9913737050124578,
"max": 3.9373653899834777,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2133.6131381988525,
"min": 250.91308683156967,
"max": 2214.2372445464134,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017963706068500566,
"min": 0.013177558867027983,
"max": 0.019900381929376938,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0538911182055017,
"min": 0.026355117734055966,
"max": 0.05937973916685829,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.057828951088918584,
"min": 0.020810776234914858,
"max": 0.0649347893272837,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17348685326675575,
"min": 0.041621552469829716,
"max": 0.17348685326675575,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1990989336666635e-06,
"min": 3.1990989336666635e-06,
"max": 0.00029525190158269995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.59729680099999e-06,
"min": 9.59729680099999e-06,
"max": 0.0008435047688317499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106633333333331,
"min": 0.10106633333333331,
"max": 0.19841730000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30319899999999994,
"min": 0.20728289999999994,
"max": 0.5811682499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.321003333333329e-05,
"min": 6.321003333333329e-05,
"max": 0.00492102327,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018963009999999986,
"min": 0.00018963009999999986,
"max": 0.014060295675,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723129000",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723131567"
},
"total": 2566.392008751,
"count": 1,
"self": 0.43919195399985256,
"children": {
"run_training.setup": {
"total": 0.06110564699997667,
"count": 1,
"self": 0.06110564699997667
},
"TrainerController.start_learning": {
"total": 2565.89171115,
"count": 1,
"self": 4.655692809002176,
"children": {
"TrainerController._reset_env": {
"total": 2.8081102490000376,
"count": 1,
"self": 2.8081102490000376
},
"TrainerController.advance": {
"total": 2558.3028797119973,
"count": 232124,
"self": 5.0217358349600545,
"children": {
"env_step": {
"total": 2047.6721557990031,
"count": 232124,
"self": 1683.4300232600572,
"children": {
"SubprocessEnvManager._take_step": {
"total": 361.06037756997745,
"count": 232124,
"self": 18.415958877051253,
"children": {
"TorchPolicy.evaluate": {
"total": 342.6444186929262,
"count": 222982,
"self": 342.6444186929262
}
}
},
"workers": {
"total": 3.181754968968562,
"count": 232124,
"self": 0.0,
"children": {
"worker_root": {
"total": 2558.237593478964,
"count": 232124,
"is_parallel": true,
"self": 1202.8018644110289,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009550340000146207,
"count": 1,
"is_parallel": true,
"self": 0.0002550290000158384,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007000049999987823,
"count": 2,
"is_parallel": true,
"self": 0.0007000049999987823
}
}
},
"UnityEnvironment.step": {
"total": 0.03139646400001084,
"count": 1,
"is_parallel": true,
"self": 0.0003792909999447147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004044870000257106,
"count": 1,
"is_parallel": true,
"self": 0.0004044870000257106
},
"communicator.exchange": {
"total": 0.02985961799998904,
"count": 1,
"is_parallel": true,
"self": 0.02985961799998904
},
"steps_from_proto": {
"total": 0.0007530680000513712,
"count": 1,
"is_parallel": true,
"self": 0.00022807200002716854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005249960000242027,
"count": 2,
"is_parallel": true,
"self": 0.0005249960000242027
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1355.435729067935,
"count": 232123,
"is_parallel": true,
"self": 40.61517104509471,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.88829578889658,
"count": 232123,
"is_parallel": true,
"self": 89.88829578889658
},
"communicator.exchange": {
"total": 1127.5410466139858,
"count": 232123,
"is_parallel": true,
"self": 1127.5410466139858
},
"steps_from_proto": {
"total": 97.3912156199579,
"count": 232123,
"is_parallel": true,
"self": 37.053249980946646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.33796563901126,
"count": 464246,
"is_parallel": true,
"self": 60.33796563901126
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 505.60898807803403,
"count": 232124,
"self": 7.189335773075982,
"children": {
"process_trajectory": {
"total": 164.1343395859567,
"count": 232124,
"self": 162.873557335957,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2607822499996928,
"count": 10,
"self": 1.2607822499996928
}
}
},
"_update_policy": {
"total": 334.28531271900135,
"count": 97,
"self": 270.0239606930005,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.26135202600085,
"count": 2910,
"self": 64.26135202600085
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0020003173849545e-06,
"count": 1,
"self": 1.0020003173849545e-06
},
"TrainerController._save_models": {
"total": 0.12502737800014074,
"count": 1,
"self": 0.0018980919999194157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12312928600022133,
"count": 1,
"self": 0.12312928600022133
}
}
}
}
}
}
}