poca-SoccerTwos / run_logs /timers.json
kar-saaragh's picture
First Push`
f62a750 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8236401081085205,
"min": 1.816789984703064,
"max": 3.2957539558410645,
"count": 542
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36414.4453125,
"min": 17382.8515625,
"max": 106336.59375,
"count": 542
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.96470588235294,
"min": 44.81651376146789,
"max": 999.0,
"count": 542
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 15984.0,
"max": 24368.0,
"count": 542
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1600.8162501543295,
"min": 1202.5799518752465,
"max": 1612.1312020510723,
"count": 536
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 272138.762526236,
"min": 2409.3842854206932,
"max": 342556.84415041376,
"count": 536
},
"SoccerTwos.Step.mean": {
"value": 5419987.0,
"min": 9202.0,
"max": 5419987.0,
"count": 542
},
"SoccerTwos.Step.sum": {
"value": 5419987.0,
"min": 9202.0,
"max": 5419987.0,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.009928393177688122,
"min": -0.1049918532371521,
"max": 0.18253982067108154,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.6878268718719482,
"min": -16.2851505279541,
"max": 25.03551483154297,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.014193202368915081,
"min": -0.10672014206647873,
"max": 0.1740785837173462,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 2.41284441947937,
"min": -17.065570831298828,
"max": 24.605972290039062,
"count": 542
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 542
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.02713294169482063,
"min": -0.5833333333333334,
"max": 0.6075979586766691,
"count": 542
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.612600088119507,
"min": -53.564000248909,
"max": 72.31839990615845,
"count": 542
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.02713294169482063,
"min": -0.5833333333333334,
"max": 0.6075979586766691,
"count": 542
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.612600088119507,
"min": -53.564000248909,
"max": 72.31839990615845,
"count": 542
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 542
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 542
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015720353148693297,
"min": 0.011549799980518098,
"max": 0.024669724563136698,
"count": 260
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015720353148693297,
"min": 0.011549799980518098,
"max": 0.024669724563136698,
"count": 260
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09480199714501698,
"min": 0.00014383122373449927,
"max": 0.12502698227763176,
"count": 260
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09480199714501698,
"min": 0.00014383122373449927,
"max": 0.12502698227763176,
"count": 260
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09643616129954656,
"min": 0.00014710981534638752,
"max": 0.12777931094169617,
"count": 260
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09643616129954656,
"min": 0.00014710981534638752,
"max": 0.12777931094169617,
"count": 260
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 260
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 260
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 260
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 260
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 260
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 260
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705133508",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\karth\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1705150124"
},
"total": 16617.011633299997,
"count": 1,
"self": 4.2295673999906285,
"children": {
"run_training.setup": {
"total": 0.07817600000271341,
"count": 1,
"self": 0.07817600000271341
},
"TrainerController.start_learning": {
"total": 16612.703889900004,
"count": 1,
"self": 11.447416600472934,
"children": {
"TrainerController._reset_env": {
"total": 4.540296700019098,
"count": 28,
"self": 4.540296700019098
},
"TrainerController.advance": {
"total": 16596.522686399505,
"count": 369636,
"self": 11.744608300512482,
"children": {
"env_step": {
"total": 7099.750584297573,
"count": 369636,
"self": 5413.150237094487,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1679.5166994022293,
"count": 369636,
"self": 64.66714889794093,
"children": {
"TorchPolicy.evaluate": {
"total": 1614.8495505042883,
"count": 685802,
"self": 1614.8495505042883
}
}
},
"workers": {
"total": 7.083647800856852,
"count": 369635,
"self": 0.0,
"children": {
"worker_root": {
"total": 16594.738982003495,
"count": 369635,
"is_parallel": true,
"self": 12384.536784907097,
"children": {
"steps_from_proto": {
"total": 0.04792250000900822,
"count": 56,
"is_parallel": true,
"self": 0.010319100074411836,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03760339993459638,
"count": 224,
"is_parallel": true,
"self": 0.03760339993459638
}
}
},
"UnityEnvironment.step": {
"total": 4210.154274596389,
"count": 369635,
"is_parallel": true,
"self": 209.84940049712895,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 183.11874929901387,
"count": 369635,
"is_parallel": true,
"self": 183.11874929901387
},
"communicator.exchange": {
"total": 3168.7093459001335,
"count": 369635,
"is_parallel": true,
"self": 3168.7093459001335
},
"steps_from_proto": {
"total": 648.4767789001125,
"count": 739270,
"is_parallel": true,
"self": 136.11069480020524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 512.3660840999073,
"count": 2957080,
"is_parallel": true,
"self": 512.3660840999073
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9485.027493801419,
"count": 369635,
"self": 85.1049199026238,
"children": {
"process_trajectory": {
"total": 1673.9583995988505,
"count": 369635,
"self": 1672.38175119886,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5766483999905176,
"count": 10,
"self": 1.5766483999905176
}
}
},
"_update_policy": {
"total": 7725.964174299945,
"count": 261,
"self": 1030.1647698001543,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6695.79940449979,
"count": 7839,
"self": 6695.79940449979
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.1000014385208488e-06,
"count": 1,
"self": 2.1000014385208488e-06
},
"TrainerController._save_models": {
"total": 0.19348810000519734,
"count": 1,
"self": 0.013423200012766756,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18006489999243058,
"count": 1,
"self": 0.18006489999243058
}
}
}
}
}
}
}