poca-SoccerTwos / run_logs /timers.json
osman93's picture
Initial commit
3a56d8f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.934769630432129,
"min": 2.8051741123199463,
"max": 3.295713424682617,
"count": 114
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 69495.34375,
"min": 18866.408203125,
"max": 125462.734375,
"count": 114
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 757.2857142857143,
"min": 451.8333333333333,
"max": 999.0,
"count": 114
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21204.0,
"min": 11988.0,
"max": 29620.0,
"count": 114
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1197.274812620925,
"min": 1195.1826354648726,
"max": 1206.890414376457,
"count": 75
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 4789.0992504837,
"min": 2390.8609154375636,
"max": 16786.735059929753,
"count": 75
},
"SoccerTwos.Step.mean": {
"value": 1139466.0,
"min": 9990.0,
"max": 1139466.0,
"count": 114
},
"SoccerTwos.Step.sum": {
"value": 1139466.0,
"min": 9990.0,
"max": 1139466.0,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0007185080321505666,
"min": -0.006864693481475115,
"max": 0.01163996197283268,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.009340604767203331,
"min": -0.08386926352977753,
"max": 0.12948094308376312,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.001403563772328198,
"min": -0.007754822261631489,
"max": 0.012222911231219769,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.018246328458189964,
"min": -0.08467042446136475,
"max": 0.13665226101875305,
"count": 114
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 114
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.26036922748272234,
"min": -0.39789473383050217,
"max": 0.3159714341163635,
"count": 114
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.3847999572753906,
"min": -7.559999942779541,
"max": 4.423600077629089,
"count": 114
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.26036922748272234,
"min": -0.39789473383050217,
"max": 0.3159714341163635,
"count": 114
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.3847999572753906,
"min": -7.559999942779541,
"max": 4.423600077629089,
"count": 114
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 114
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016350780782158836,
"min": 0.014799594972118938,
"max": 0.032501664088649705,
"count": 52
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016350780782158836,
"min": 0.014799594972118938,
"max": 0.032501664088649705,
"count": 52
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.00013933395156906652,
"min": 2.502497588352526e-07,
"max": 0.00679068033748384,
"count": 52
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.00013933395156906652,
"min": 2.502497588352526e-07,
"max": 0.00679068033748384,
"count": 52
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0004637145543074439,
"min": 2.637474003582459e-07,
"max": 0.007021134167637986,
"count": 52
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0004637145543074439,
"min": 2.637474003582459e-07,
"max": 0.007021134167637986,
"count": 52
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 52
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 52
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999998,
"min": 0.19999999999999998,
"max": 0.2,
"count": 52
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999998,
"min": 0.19999999999999998,
"max": 0.2,
"count": 52
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.004999999999999999,
"max": 0.005,
"count": 52
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.004999999999999999,
"max": 0.005,
"count": 52
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681072315",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "D:\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.exe --run-id=SoccerTwos4 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1681101959"
},
"total": 29643.936036299998,
"count": 1,
"self": 2.311513199998444,
"children": {
"run_training.setup": {
"total": 0.7151799000000025,
"count": 1,
"self": 0.7151799000000025
},
"TrainerController.start_learning": {
"total": 29640.9093432,
"count": 1,
"self": 3.1426214995444752,
"children": {
"TrainerController._reset_env": {
"total": 29.073369800000478,
"count": 6,
"self": 29.073369800000478
},
"TrainerController.advance": {
"total": 29607.404473400456,
"count": 74319,
"self": 3.3410247998981504,
"children": {
"env_step": {
"total": 2784.290730200159,
"count": 74319,
"self": 2194.6706589003516,
"children": {
"SubprocessEnvManager._take_step": {
"total": 587.4366180999746,
"count": 74319,
"self": 19.001051999797255,
"children": {
"TorchPolicy.evaluate": {
"total": 568.4355661001773,
"count": 147606,
"self": 568.4355661001773
}
}
},
"workers": {
"total": 2.18345319983311,
"count": 74319,
"self": 0.0,
"children": {
"worker_root": {
"total": 29268.696971899622,
"count": 74319,
"is_parallel": true,
"self": 27494.989020299494,
"children": {
"steps_from_proto": {
"total": 0.8304188999958981,
"count": 12,
"is_parallel": true,
"self": 0.010917399994976762,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.8195015000009214,
"count": 48,
"is_parallel": true,
"self": 0.8195015000009214
}
}
},
"UnityEnvironment.step": {
"total": 1772.8775327001335,
"count": 74319,
"is_parallel": true,
"self": 92.96242089916927,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.49146770031265,
"count": 74319,
"is_parallel": true,
"self": 78.49146770031265
},
"communicator.exchange": {
"total": 1295.150091900374,
"count": 74319,
"is_parallel": true,
"self": 1295.150091900374
},
"steps_from_proto": {
"total": 306.27355220027766,
"count": 148638,
"is_parallel": true,
"self": 60.68742410255092,
"children": {
"_process_rank_one_or_two_observation": {
"total": 245.58612809772674,
"count": 594552,
"is_parallel": true,
"self": 245.58612809772674
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 26819.7727184004,
"count": 74319,
"self": 19.934344800534745,
"children": {
"process_trajectory": {
"total": 467.00601679987153,
"count": 74319,
"self": 461.79728719987247,
"children": {
"RLTrainer._checkpoint": {
"total": 5.20872959999906,
"count": 2,
"self": 5.20872959999906
}
}
},
"_update_policy": {
"total": 26332.832356799994,
"count": 53,
"self": 3235.4327074000284,
"children": {
"TorchPOCAOptimizer.update": {
"total": 23097.399649399966,
"count": 15854,
"self": 23097.399649399966
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.1999989005271345e-06,
"count": 1,
"self": 3.1999989005271345e-06
},
"TrainerController._save_models": {
"total": 1.2888753000006545,
"count": 1,
"self": 0.03987529999722028,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2490000000034343,
"count": 1,
"self": 1.2490000000034343
}
}
}
}
}
}
}