poca-SoccerTwos / run_logs /timers.json
pinaggle's picture
First Push
89f743e
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.6313775777816772,
"min": 0.687566876411438,
"max": 3.2957448959350586,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31792.28515625,
"min": 11772.873046875,
"max": 123214.2890625,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 73.01449275362319,
"min": 47.64705882352941,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20152.0,
"min": 11556.0,
"max": 28580.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1515.705751923598,
"min": 1168.9205235601728,
"max": 1628.994339523214,
"count": 3288
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 209167.3937654565,
"min": 2337.8410471203456,
"max": 319277.6253038919,
"count": 3288
},
"SoccerTwos.Step.mean": {
"value": 49999996.0,
"min": 9306.0,
"max": 49999996.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999996.0,
"min": 9306.0,
"max": 49999996.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.060066141188144684,
"min": -0.1634523570537567,
"max": 0.10722830146551132,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -8.229061126708984,
"min": -24.354400634765625,
"max": 16.566972732543945,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06014878302812576,
"min": -0.16678248345851898,
"max": 0.10946665704250336,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -8.24038314819336,
"min": -24.850589752197266,
"max": 16.529464721679688,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.21258978164979142,
"min": -0.8,
"max": 0.50735999584198,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -29.124800086021423,
"min": -64.3296000957489,
"max": 52.583200216293335,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.21258978164979142,
"min": -0.8,
"max": 0.50735999584198,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -29.124800086021423,
"min": -64.3296000957489,
"max": 52.583200216293335,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017765630949967696,
"min": 0.009146430075149207,
"max": 0.025482347095385195,
"count": 2367
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017765630949967696,
"min": 0.009146430075149207,
"max": 0.025482347095385195,
"count": 2367
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0721655418475469,
"min": 1.8455612356647426e-11,
"max": 0.0993954966465632,
"count": 2367
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0721655418475469,
"min": 1.8455612356647426e-11,
"max": 0.0993954966465632,
"count": 2367
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.07412757948040963,
"min": 2.315871429810675e-11,
"max": 0.1015111818909645,
"count": 2367
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.07412757948040963,
"min": 2.315871429810675e-11,
"max": 0.1015111818909645,
"count": 2367
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2367
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2367
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2367
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 2367
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2367
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 2367
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682441296",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/home/naiqing/miniconda3/envs/soccer/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-execuables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682545661"
},
"total": 104364.96162325889,
"count": 1,
"self": 0.3219199627637863,
"children": {
"run_training.setup": {
"total": 0.009581569582223892,
"count": 1,
"self": 0.009581569582223892
},
"TrainerController.start_learning": {
"total": 104364.63012172654,
"count": 1,
"self": 69.6984545700252,
"children": {
"TrainerController._reset_env": {
"total": 17.646922294050455,
"count": 250,
"self": 17.646922294050455
},
"TrainerController.advance": {
"total": 104277.09735712782,
"count": 3329221,
"self": 66.82268615439534,
"children": {
"env_step": {
"total": 87675.0121406503,
"count": 3329221,
"self": 76123.50536464527,
"children": {
"SubprocessEnvManager._take_step": {
"total": 11511.910114817321,
"count": 3329221,
"self": 405.9245461076498,
"children": {
"TorchPolicy.evaluate": {
"total": 11105.985568709671,
"count": 6366322,
"self": 11105.985568709671
}
}
},
"workers": {
"total": 39.59666118770838,
"count": 3329221,
"self": 0.0,
"children": {
"worker_root": {
"total": 104202.1654753685,
"count": 3329221,
"is_parallel": true,
"self": 36788.327809829265,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0073916055262088776,
"count": 2,
"is_parallel": true,
"self": 0.0013881660997867584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006003439426422119,
"count": 8,
"is_parallel": true,
"self": 0.006003439426422119
}
}
},
"UnityEnvironment.step": {
"total": 0.04743001610040665,
"count": 1,
"is_parallel": true,
"self": 0.001555703580379486,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000791698694229126,
"count": 1,
"is_parallel": true,
"self": 0.000791698694229126
},
"communicator.exchange": {
"total": 0.04074963927268982,
"count": 1,
"is_parallel": true,
"self": 0.04074963927268982
},
"steps_from_proto": {
"total": 0.004332974553108215,
"count": 2,
"is_parallel": true,
"self": 0.0007183775305747986,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0036145970225334167,
"count": 8,
"is_parallel": true,
"self": 0.0036145970225334167
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 67412.90954201296,
"count": 3329220,
"is_parallel": true,
"self": 4111.922827441245,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2588.0620989464223,
"count": 3329220,
"is_parallel": true,
"self": 2588.0620989464223
},
"communicator.exchange": {
"total": 49042.05218397081,
"count": 3329220,
"is_parallel": true,
"self": 49042.05218397081
},
"steps_from_proto": {
"total": 11670.872431654483,
"count": 6658440,
"is_parallel": true,
"self": 2099.1705483794212,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9571.701883275062,
"count": 26633760,
"is_parallel": true,
"self": 9571.701883275062
}
}
}
}
},
"steps_from_proto": {
"total": 0.9281235262751579,
"count": 498,
"is_parallel": true,
"self": 0.169519305229187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.7586042210459709,
"count": 1992,
"is_parallel": true,
"self": 0.7586042210459709
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 16535.262530323118,
"count": 3329221,
"self": 556.31035291031,
"children": {
"process_trajectory": {
"total": 5058.4026073515415,
"count": 3329221,
"self": 5040.401682328433,
"children": {
"RLTrainer._checkpoint": {
"total": 18.00092502310872,
"count": 100,
"self": 18.00092502310872
}
}
},
"_update_policy": {
"total": 10920.549570061266,
"count": 2367,
"self": 6795.289920583367,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4125.259649477899,
"count": 71010,
"self": 4125.259649477899
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.897615432739258e-07,
"count": 1,
"self": 7.897615432739258e-07
},
"TrainerController._save_models": {
"total": 0.18738694489002228,
"count": 1,
"self": 0.0015438683331012726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.185843076556921,
"count": 1,
"self": 0.185843076556921
}
}
}
}
}
}
}