{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.40323805809021, "min": 1.40323805809021, "max": 1.4302598237991333, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 68740.421875, "min": 67141.3515625, "max": 79955.078125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 90.63853211009175, "min": 85.88020833333333, "max": 397.4803149606299, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49398.0, "min": 49092.0, "max": 50480.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999990.0, "min": 49857.0, "max": 1999990.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999990.0, "min": 49857.0, "max": 1999990.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4186103343963623, "min": 0.16601163148880005, "max": 2.477508544921875, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1318.142578125, "min": 20.917465209960938, "max": 1384.45556640625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7144423652132716, "min": 1.8318118902425917, "max": 3.915887971197239, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2024.371089041233, "min": 230.80829817056656, "max": 2200.6938104629517, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7144423652132716, "min": 1.8318118902425917, "max": 3.915887971197239, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2024.371089041233, "min": 230.80829817056656, "max": 2200.6938104629517, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01774986506061396, "min": 0.013674028885361622, "max": 0.02203119363758661, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05324959518184187, "min": 0.027348057770723244, "max": 0.06609358091275983, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05452859364449978, "min": 0.021118822652432655, "max": 0.05989588455607493, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.16358578093349935, "min": 0.04261930889139573, "max": 0.17750743019084136, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.292298902599989e-06, "min": 3.292298902599989e-06, "max": 0.00029532352655882487, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.876896707799966e-06, "min": 9.876896707799966e-06, "max": 0.0008441331186222999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10109739999999996, "min": 0.10109739999999996, "max": 0.19844117500000003, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3032921999999999, "min": 0.20736680000000002, "max": 0.5813777, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.476025999999983e-05, "min": 6.476025999999983e-05, "max": 0.004922214632500001, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00019428077999999948, "min": 0.00019428077999999948, "max": 0.01407074723, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1722697558", "python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1722700224" }, "total": 2666.717331223, "count": 1, "self": 0.44740705100048217, "children": { "run_training.setup": { "total": 0.06784105199994883, "count": 1, "self": 0.06784105199994883 }, "TrainerController.start_learning": { "total": 2666.2020831199998, "count": 1, "self": 5.080133799105624, "children": { "TrainerController._reset_env": { "total": 2.8099093759999505, "count": 1, "self": 2.8099093759999505 }, "TrainerController.advance": { "total": 2658.1976548578946, "count": 231953, "self": 5.247483616919908, "children": { "env_step": { "total": 2106.6803967290366, "count": 231953, "self": 1737.936243831074, "children": { "SubprocessEnvManager._take_step": { "total": 365.38651336699434, "count": 231953, "self": 19.38338673302485, "children": { "TorchPolicy.evaluate": { "total": 346.0031266339695, "count": 222866, "self": 346.0031266339695 } } }, "workers": { "total": 3.357639530968072, "count": 231953, "self": 0.0, "children": { "worker_root": { "total": 2657.9212782031505, "count": 231953, "is_parallel": true, "self": 1263.1014508200926, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009576319999951011, "count": 1, "is_parallel": true, "self": 0.00024629799997910595, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007113340000159951, "count": 2, "is_parallel": true, "self": 0.0007113340000159951 } } }, "UnityEnvironment.step": { "total": 0.03078109900002346, "count": 1, "is_parallel": true, "self": 0.0003897830000596514, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021379400004661875, "count": 1, "is_parallel": true, "self": 0.00021379400004661875 }, "communicator.exchange": { "total": 0.02941253599999527, "count": 1, "is_parallel": true, "self": 0.02941253599999527 }, "steps_from_proto": { "total": 0.0007649859999219188, "count": 1, "is_parallel": true, "self": 0.00022860799992940883, "children": { "_process_rank_one_or_two_observation": { "total": 0.00053637799999251, "count": 2, "is_parallel": true, "self": 0.00053637799999251 } } } } } } }, "UnityEnvironment.step": { "total": 1394.8198273830578, "count": 231952, "is_parallel": true, "self": 41.501642598296485, "children": { "UnityEnvironment._generate_step_input": { "total": 87.52638170584976, "count": 231952, "is_parallel": true, "self": 87.52638170584976 }, "communicator.exchange": { "total": 1166.6290221039258, "count": 231952, "is_parallel": true, "self": 1166.6290221039258 }, "steps_from_proto": { "total": 99.16278097498582, "count": 231952, "is_parallel": true, "self": 36.86654930699865, "children": { "_process_rank_one_or_two_observation": { "total": 62.29623166798717, "count": 463904, "is_parallel": true, "self": 62.29623166798717 } } } } } } } } } } }, "trainer_advance": { "total": 546.2697745119378, "count": 231953, "self": 7.74574797385776, "children": { "process_trajectory": { "total": 170.29569338108092, "count": 231953, "self": 168.80397751508133, "children": { "RLTrainer._checkpoint": { "total": 1.4917158659995948, "count": 10, "self": 1.4917158659995948 } } }, "_update_policy": { "total": 368.22833315699916, "count": 97, "self": 301.12879475899194, "children": { "TorchPPOOptimizer.update": { "total": 67.09953839800721, "count": 2910, "self": 67.09953839800721 } } } } } } }, "trainer_threads": { "total": 1.0049998309114017e-06, "count": 1, "self": 1.0049998309114017e-06 }, "TrainerController._save_models": { "total": 0.11438408200001504, "count": 1, "self": 0.0020385090001582284, "children": { "RLTrainer._checkpoint": { "total": 0.11234557299985681, "count": 1, "self": 0.11234557299985681 } } } } } } }