poca-SoccerTwos / run_logs /timers.json
chgenly's picture
First Push
4ce3a14
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.503925085067749,
"min": 1.2870746850967407,
"max": 3.2957651615142822,
"count": 2643
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 32581.033203125,
"min": 3673.99755859375,
"max": 105464.484375,
"count": 2643
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 54.142857142857146,
"min": 38.359375,
"max": 999.0,
"count": 2643
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 3996.0,
"max": 30516.0,
"count": 2643
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1721.2802988760939,
"min": 1187.1543862478682,
"max": 1740.170175564165,
"count": 2568
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313273.0143954491,
"min": 2380.369359055664,
"max": 409485.1632906863,
"count": 2568
},
"SoccerTwos.Step.mean": {
"value": 26429936.0,
"min": 9274.0,
"max": 26429936.0,
"count": 2643
},
"SoccerTwos.Step.sum": {
"value": 26429936.0,
"min": 9274.0,
"max": 26429936.0,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.06270360946655273,
"min": -0.13885830342769623,
"max": 0.18183642625808716,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -11.474761009216309,
"min": -23.88362693786621,
"max": 28.002809524536133,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.06207377091050148,
"min": -0.1356102079153061,
"max": 0.18300901353359222,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -11.35949993133545,
"min": -23.588298797607422,
"max": 28.183387756347656,
"count": 2643
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2643
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.06408524382961252,
"min": -0.5377428574221474,
"max": 0.44646154000208926,
"count": 2643
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -11.727599620819092,
"min": -64.78880047798157,
"max": 59.410799741744995,
"count": 2643
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.06408524382961252,
"min": -0.5377428574221474,
"max": 0.44646154000208926,
"count": 2643
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -11.727599620819092,
"min": -64.78880047798157,
"max": 59.410799741744995,
"count": 2643
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2643
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2643
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015245851777338733,
"min": 0.008800792263355107,
"max": 0.025791054552731413,
"count": 1277
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015245851777338733,
"min": 0.008800792263355107,
"max": 0.025791054552731413,
"count": 1277
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10285454814632734,
"min": 1.4963562714835157e-07,
"max": 0.13516210541129112,
"count": 1277
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10285454814632734,
"min": 1.4963562714835157e-07,
"max": 0.13516210541129112,
"count": 1277
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10417892883221308,
"min": 1.7443839558003069e-07,
"max": 0.1369783416390419,
"count": 1277
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10417892883221308,
"min": 1.7443839558003069e-07,
"max": 0.1369783416390419,
"count": 1277
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1277
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 1277
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 1277
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 1277
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1277
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 1277
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695620799",
"python_version": "3.9.18 (main, Sep 11 2023, 13:41:44) \n[GCC 11.2.0]",
"command_line_arguments": "/home/genly/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695695757"
},
"total": 74957.5998916002,
"count": 1,
"self": 0.37824017368257046,
"children": {
"run_training.setup": {
"total": 0.013932876754552126,
"count": 1,
"self": 0.013932876754552126
},
"TrainerController.start_learning": {
"total": 74957.20771854976,
"count": 1,
"self": 28.325114775449038,
"children": {
"TrainerController._reset_env": {
"total": 3.6112374193035066,
"count": 133,
"self": 3.6112374193035066
},
"TrainerController.advance": {
"total": 74924.97800013516,
"count": 1819864,
"self": 26.490538095124066,
"children": {
"env_step": {
"total": 26447.454270750284,
"count": 1819864,
"self": 22078.23588775145,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4352.479857348371,
"count": 1819864,
"self": 161.50091971829534,
"children": {
"TorchPolicy.evaluate": {
"total": 4190.978937630076,
"count": 3326822,
"self": 4190.978937630076
}
}
},
"workers": {
"total": 16.738525650464,
"count": 1819863,
"self": 0.0,
"children": {
"worker_root": {
"total": 74914.41346712876,
"count": 1819863,
"is_parallel": true,
"self": 56305.386541230604,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002260328270494938,
"count": 2,
"is_parallel": true,
"self": 0.00043610529974102974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018242229707539082,
"count": 8,
"is_parallel": true,
"self": 0.0018242229707539082
}
}
},
"UnityEnvironment.step": {
"total": 0.024057872127741575,
"count": 1,
"is_parallel": true,
"self": 0.0007963343523442745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009081787429749966,
"count": 1,
"is_parallel": true,
"self": 0.0009081787429749966
},
"communicator.exchange": {
"total": 0.020508917048573494,
"count": 1,
"is_parallel": true,
"self": 0.020508917048573494
},
"steps_from_proto": {
"total": 0.0018444419838488102,
"count": 2,
"is_parallel": true,
"self": 0.00036615971475839615,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001478282269090414,
"count": 8,
"is_parallel": true,
"self": 0.001478282269090414
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 18608.784390117973,
"count": 1819862,
"is_parallel": true,
"self": 1090.4689699783921,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 696.5572505993769,
"count": 1819862,
"is_parallel": true,
"self": 696.5572505993769
},
"communicator.exchange": {
"total": 13802.903025161475,
"count": 1819862,
"is_parallel": true,
"self": 13802.903025161475
},
"steps_from_proto": {
"total": 3018.855144378729,
"count": 3639724,
"is_parallel": true,
"self": 468.9475295683369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2549.9076148103923,
"count": 14558896,
"is_parallel": true,
"self": 2549.9076148103923
}
}
}
}
},
"steps_from_proto": {
"total": 0.24253578018397093,
"count": 264,
"is_parallel": true,
"self": 0.03884504083544016,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.20369073934853077,
"count": 1056,
"is_parallel": true,
"self": 0.20369073934853077
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 48451.03319128975,
"count": 1819863,
"self": 244.69135109847412,
"children": {
"process_trajectory": {
"total": 7197.596126302145,
"count": 1819863,
"self": 7184.11433515558,
"children": {
"RLTrainer._checkpoint": {
"total": 13.481791146565229,
"count": 52,
"self": 13.481791146565229
}
}
},
"_update_policy": {
"total": 41008.74571388913,
"count": 1277,
"self": 3996.2130596269853,
"children": {
"TorchPOCAOptimizer.update": {
"total": 37012.53265426215,
"count": 38322,
"self": 37012.53265426215
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.809139788150787e-07,
"count": 1,
"self": 7.809139788150787e-07
},
"TrainerController._save_models": {
"total": 0.2933654389344156,
"count": 1,
"self": 0.0068657975643873215,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28649964137002826,
"count": 1,
"self": 0.28649964137002826
}
}
}
}
}
}
}