wirthy21's picture
First Push
be31809 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2326748371124268,
"min": 3.204035997390747,
"max": 3.2957241535186768,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44895.38671875,
"min": 25380.65625,
"max": 105972.609375,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 832.0,
"min": 403.0,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 16640.0,
"min": 7992.0,
"max": 29596.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.1028114958895,
"min": 1195.924945860192,
"max": 1203.7108461116661,
"count": 41
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2396.205622991779,
"min": 2394.1214805350583,
"max": 14421.224961350163,
"count": 41
},
"SoccerTwos.Step.mean": {
"value": 499250.0,
"min": 9518.0,
"max": 499250.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499250.0,
"min": 9518.0,
"max": 499250.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.015718918293714523,
"min": -0.0082920016720891,
"max": 0.026124007999897003,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.17290809750556946,
"min": -0.11519016325473785,
"max": 0.41663283109664917,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.013119292445480824,
"min": -0.013039717450737953,
"max": 0.02596515603363514,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.14431221783161163,
"min": -0.13039717078208923,
"max": 0.4378693699836731,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1517818190834739,
"min": -0.4618285724094936,
"max": 0.2395777768558926,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.669600009918213,
"min": -6.761199951171875,
"max": 4.312399983406067,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1517818190834739,
"min": -0.4618285724094936,
"max": 0.2395777768558926,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.669600009918213,
"min": -6.761199951171875,
"max": 4.312399983406067,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017385409353300928,
"min": 0.012158198804718268,
"max": 0.019352194482538228,
"count": 22
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017385409353300928,
"min": 0.012158198804718268,
"max": 0.019352194482538228,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.002847444956811766,
"min": 0.0001978763655642979,
"max": 0.005287822421329717,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.002847444956811766,
"min": 0.0001978763655642979,
"max": 0.005287822421329717,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.003071119050340106,
"min": 0.00018883093192319697,
"max": 0.00531188848738869,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.003071119050340106,
"min": 0.00018883093192319697,
"max": 0.00531188848738869,
"count": 22
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 22
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 22
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 22
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735913681",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/poca/SoccerTwos.yaml --env=/content/ml-agents/training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos_run3 --no-graphics --force",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735916781"
},
"total": 3100.012670545,
"count": 1,
"self": 0.6521458779998284,
"children": {
"run_training.setup": {
"total": 0.08977682400018239,
"count": 1,
"self": 0.08977682400018239
},
"TrainerController.start_learning": {
"total": 3099.270747843,
"count": 1,
"self": 1.57983341695126,
"children": {
"TrainerController._reset_env": {
"total": 3.4483198789994276,
"count": 3,
"self": 3.4483198789994276
},
"TrainerController.advance": {
"total": 3093.9372038460497,
"count": 32621,
"self": 1.9241502010345357,
"children": {
"env_step": {
"total": 1352.9505150630157,
"count": 32621,
"self": 1088.1388273300536,
"children": {
"SubprocessEnvManager._take_step": {
"total": 263.83665651387855,
"count": 32621,
"self": 10.851296125117187,
"children": {
"TorchPolicy.evaluate": {
"total": 252.98536038876136,
"count": 64782,
"self": 252.98536038876136
}
}
},
"workers": {
"total": 0.9750312190835757,
"count": 32621,
"self": 0.0,
"children": {
"worker_root": {
"total": 3090.7365345539756,
"count": 32621,
"is_parallel": true,
"self": 2205.778387420889,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005868459999874176,
"count": 2,
"is_parallel": true,
"self": 0.0016513699974893825,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0042170900023847935,
"count": 8,
"is_parallel": true,
"self": 0.0042170900023847935
}
}
},
"UnityEnvironment.step": {
"total": 0.05771823299983225,
"count": 1,
"is_parallel": true,
"self": 0.00201151599867444,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0010542660002101911,
"count": 1,
"is_parallel": true,
"self": 0.0010542660002101911
},
"communicator.exchange": {
"total": 0.04582123500040325,
"count": 1,
"is_parallel": true,
"self": 0.04582123500040325
},
"steps_from_proto": {
"total": 0.008831216000544373,
"count": 2,
"is_parallel": true,
"self": 0.0009418929994353675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007889323001109005,
"count": 8,
"is_parallel": true,
"self": 0.007889323001109005
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 884.9523733590859,
"count": 32620,
"is_parallel": true,
"self": 61.99416238412232,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 34.18122957793457,
"count": 32620,
"is_parallel": true,
"self": 34.18122957793457
},
"communicator.exchange": {
"total": 623.3286414490258,
"count": 32620,
"is_parallel": true,
"self": 623.3286414490258
},
"steps_from_proto": {
"total": 165.4483399480032,
"count": 65240,
"is_parallel": true,
"self": 31.228980546780804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 134.2193594012224,
"count": 260960,
"is_parallel": true,
"self": 134.2193594012224
}
}
}
}
},
"steps_from_proto": {
"total": 0.0057737740007723914,
"count": 4,
"is_parallel": true,
"self": 0.0012993949985684594,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004474379002203932,
"count": 16,
"is_parallel": true,
"self": 0.004474379002203932
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1739.0625385819994,
"count": 32621,
"self": 14.798050648002572,
"children": {
"process_trajectory": {
"total": 228.75329991899616,
"count": 32621,
"self": 228.39128140299636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3620185159998073,
"count": 1,
"self": 0.3620185159998073
}
}
},
"_update_policy": {
"total": 1495.5111880150007,
"count": 22,
"self": 119.33729730502273,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1376.173890709978,
"count": 669,
"self": 1376.173890709978
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4610004654969089e-06,
"count": 1,
"self": 1.4610004654969089e-06
},
"TrainerController._save_models": {
"total": 0.3053892399993856,
"count": 1,
"self": 0.004988144998606003,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3004010950007796,
"count": 1,
"self": 0.3004010950007796
}
}
}
}
}
}
}