poca-SoccerTwos / run_logs /timers.json
mihirdeo16's picture
Train agent for 3M steps
9ff630d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2169365882873535,
"min": 2.187488079071045,
"max": 3.2957346439361572,
"count": 310
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43274.6015625,
"min": 22407.81640625,
"max": 155723.5,
"count": 310
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 63.80263157894737,
"min": 51.58064516129032,
"max": 999.0,
"count": 310
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19396.0,
"min": 16232.0,
"max": 24364.0,
"count": 310
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1271.8346698323467,
"min": 1178.8655320001328,
"max": 1290.899356399355,
"count": 270
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 193318.86981451669,
"min": 2390.197286959138,
"max": 246259.7132427406,
"count": 270
},
"SoccerTwos.Step.mean": {
"value": 3099768.0,
"min": 9198.0,
"max": 3099768.0,
"count": 310
},
"SoccerTwos.Step.sum": {
"value": 3099768.0,
"min": 9198.0,
"max": 3099768.0,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.019117312505841255,
"min": -0.14292658865451813,
"max": 0.144206702709198,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.9249486923217773,
"min": -24.726299285888672,
"max": 21.775211334228516,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.015083525329828262,
"min": -0.1498643457889557,
"max": 0.15344072878360748,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.307779312133789,
"min": -25.926530838012695,
"max": 23.1695499420166,
"count": 310
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 310
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.0033464049981310477,
"min": -0.6,
"max": 0.4062083338697751,
"count": 310
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -0.5119999647140503,
"min": -43.0907998085022,
"max": 48.10540014505386,
"count": 310
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.0033464049981310477,
"min": -0.6,
"max": 0.4062083338697751,
"count": 310
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -0.5119999647140503,
"min": -43.0907998085022,
"max": 48.10540014505386,
"count": 310
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 310
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 310
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016340021406467713,
"min": 0.011538018301071134,
"max": 0.021530094879344687,
"count": 148
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016340021406467713,
"min": 0.011538018301071134,
"max": 0.021530094879344687,
"count": 148
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.07169456645846367,
"min": 6.737703222370328e-07,
"max": 0.08741842091083527,
"count": 148
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.07169456645846367,
"min": 6.737703222370328e-07,
"max": 0.08741842091083527,
"count": 148
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0750076299905777,
"min": 8.355482589195162e-07,
"max": 0.09159686267375947,
"count": 148
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0750076299905777,
"min": 8.355482589195162e-07,
"max": 0.09159686267375947,
"count": 148
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.00030000000000000003,
"count": 148
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.00030000000000000003,
"count": 148
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.19999999999999996,
"min": 0.1999999999999999,
"max": 0.19999999999999996,
"count": 148
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.19999999999999996,
"min": 0.1999999999999999,
"max": 0.19999999999999996,
"count": 148
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 148
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 148
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686596276",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686605075"
},
"total": 8799.348452148,
"count": 1,
"self": 0.4373315820012067,
"children": {
"run_training.setup": {
"total": 0.03887811200002034,
"count": 1,
"self": 0.03887811200002034
},
"TrainerController.start_learning": {
"total": 8798.872242454,
"count": 1,
"self": 6.221857014985289,
"children": {
"TrainerController._reset_env": {
"total": 16.689204636003296,
"count": 150,
"self": 16.689204636003296
},
"TrainerController.advance": {
"total": 8775.96114672701,
"count": 245185,
"self": 6.302414115654756,
"children": {
"env_step": {
"total": 6701.484014687017,
"count": 245185,
"self": 5292.895381895895,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1405.0207310632172,
"count": 245185,
"self": 43.28221063819183,
"children": {
"TorchPolicy.evaluate": {
"total": 1361.7385204250254,
"count": 476844,
"self": 1361.7385204250254
}
}
},
"workers": {
"total": 3.5679017279051095,
"count": 245184,
"self": 0.0,
"children": {
"worker_root": {
"total": 8783.511651326256,
"count": 245184,
"is_parallel": true,
"self": 4296.102282267753,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006961368000020229,
"count": 2,
"is_parallel": true,
"self": 0.004563440000310948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023979279997092817,
"count": 8,
"is_parallel": true,
"self": 0.0023979279997092817
}
}
},
"UnityEnvironment.step": {
"total": 0.0665321590001895,
"count": 1,
"is_parallel": true,
"self": 0.0011703550003403507,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0009083689999442868,
"count": 1,
"is_parallel": true,
"self": 0.0009083689999442868
},
"communicator.exchange": {
"total": 0.06072293499983061,
"count": 1,
"is_parallel": true,
"self": 0.06072293499983061
},
"steps_from_proto": {
"total": 0.003730500000074244,
"count": 2,
"is_parallel": true,
"self": 0.0007338499999605119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002996650000113732,
"count": 8,
"is_parallel": true,
"self": 0.002996650000113732
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4487.028668356498,
"count": 245183,
"is_parallel": true,
"self": 273.5608405467674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 182.9845698220397,
"count": 245183,
"is_parallel": true,
"self": 182.9845698220397
},
"communicator.exchange": {
"total": 3133.187533543928,
"count": 245183,
"is_parallel": true,
"self": 3133.187533543928
},
"steps_from_proto": {
"total": 897.2957244437628,
"count": 490366,
"is_parallel": true,
"self": 149.70583543937755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 747.5898890043852,
"count": 1961464,
"is_parallel": true,
"self": 747.5898890043852
}
}
}
}
},
"steps_from_proto": {
"total": 0.3807007020052424,
"count": 298,
"is_parallel": true,
"self": 0.06820461699453517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.31249608501070725,
"count": 1192,
"is_parallel": true,
"self": 0.31249608501070725
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2068.1747179243384,
"count": 245184,
"self": 46.041670692545495,
"children": {
"process_trajectory": {
"total": 438.4016234238052,
"count": 245184,
"self": 436.9350880198017,
"children": {
"RLTrainer._checkpoint": {
"total": 1.46653540400348,
"count": 6,
"self": 1.46653540400348
}
}
},
"_update_policy": {
"total": 1583.7314238079878,
"count": 148,
"self": 1005.2171037679418,
"children": {
"TorchPOCAOptimizer.update": {
"total": 578.514320040046,
"count": 7405,
"self": 578.514320040046
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3850003597326577e-06,
"count": 1,
"self": 1.3850003597326577e-06
},
"TrainerController._save_models": {
"total": 3.2691001251805574e-05,
"count": 1,
"self": 3.2691001251805574e-05
}
}
}
}
}