poca-SoccerTwos / run_logs /timers.json
Hrou's picture
First Push
5c4d5d4 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8022698163986206,
"min": 1.8022698163986206,
"max": 1.8022698163986206,
"count": 1
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 20646.802734375,
"min": 20646.802734375,
"max": 20646.802734375,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 52.61363636363637,
"min": 52.61363636363637,
"max": 52.61363636363637,
"count": 1
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 9260.0,
"min": 9260.0,
"max": 9260.0,
"count": 1
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1703.0449882685107,
"min": 1703.0449882685107,
"max": 1703.0449882685107,
"count": 1
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 149867.95896762895,
"min": 149867.95896762895,
"max": 149867.95896762895,
"count": 1
},
"SoccerTwos.Step.mean": {
"value": 14399980.0,
"min": 14399980.0,
"max": 14399980.0,
"count": 1
},
"SoccerTwos.Step.sum": {
"value": 14399980.0,
"min": 14399980.0,
"max": 14399980.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.12761053442955017,
"min": -0.12761053442955017,
"max": -0.12761053442955017,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.974505424499512,
"min": -10.974505424499512,
"max": -10.974505424499512,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.12688703835010529,
"min": -0.12688703835010529,
"max": -0.12688703835010529,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.912284851074219,
"min": -10.912284851074219,
"max": -10.912284851074219,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.3230837209280147,
"min": -0.3230837209280147,
"max": -0.3230837209280147,
"count": 1
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -27.785199999809265,
"min": -27.785199999809265,
"max": -27.785199999809265,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.3230837209280147,
"min": -0.3230837209280147,
"max": -0.3230837209280147,
"count": 1
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -27.785199999809265,
"min": -27.785199999809265,
"max": -27.785199999809265,
"count": 1
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740246558",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/Rava/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.app --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0",
"numpy_version": "1.23.5",
"end_time_seconds": "1740246574"
},
"total": 16.636009916997864,
"count": 1,
"self": 0.18583970901090652,
"children": {
"run_training.setup": {
"total": 0.03656129199953284,
"count": 1,
"self": 0.03656129199953284
},
"TrainerController.start_learning": {
"total": 16.413608915987425,
"count": 1,
"self": 0.002974370145238936,
"children": {
"TrainerController._reset_env": {
"total": 1.857082791015273,
"count": 2,
"self": 1.857082791015273
},
"TrainerController.advance": {
"total": 14.452344170829747,
"count": 387,
"self": 0.0027541567687876523,
"children": {
"env_step": {
"total": 13.922965923979064,
"count": 387,
"self": 13.51434391582734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 0.40676737106696237,
"count": 387,
"self": 0.012269758910406381,
"children": {
"TorchPolicy.evaluate": {
"total": 0.394497612156556,
"count": 716,
"self": 0.394497612156556
}
}
},
"workers": {
"total": 0.0018546370847616345,
"count": 387,
"self": 0.0,
"children": {
"worker_root": {
"total": 14.876430246149539,
"count": 387,
"is_parallel": true,
"self": 1.7209492633846821,
"children": {
"steps_from_proto": {
"total": 0.002650748996529728,
"count": 4,
"is_parallel": true,
"self": 0.00036129201180301607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002289456984726712,
"count": 16,
"is_parallel": true,
"self": 0.002289456984726712
}
}
},
"UnityEnvironment.step": {
"total": 13.152830233768327,
"count": 387,
"is_parallel": true,
"self": 0.03488348981773015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.2457014889077982,
"count": 387,
"is_parallel": true,
"self": 0.2457014889077982
},
"communicator.exchange": {
"total": 12.421524163917638,
"count": 387,
"is_parallel": true,
"self": 12.421524163917638
},
"steps_from_proto": {
"total": 0.45072109112516046,
"count": 774,
"is_parallel": true,
"self": 0.048481980338692665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.4022391107864678,
"count": 3096,
"is_parallel": true,
"self": 0.4022391107864678
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 0.5266240900818957,
"count": 387,
"self": 0.018710285075940192,
"children": {
"process_trajectory": {
"total": 0.5079138050059555,
"count": 387,
"self": 0.5079138050059555
}
}
}
}
},
"trainer_threads": {
"total": 2.9199873097240925e-07,
"count": 1,
"self": 2.9199873097240925e-07
},
"TrainerController._save_models": {
"total": 0.10120729199843481,
"count": 1,
"self": 0.0005302920035319403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10067699999490287,
"count": 1,
"self": 0.10067699999490287
}
}
}
}
}
}
}