poca-SoccerTwos / run_logs /timers.json
soonchang's picture
Update model
b4a800a
raw
history blame contribute delete
No virus
18.3 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2872161865234375,
"min": 3.283947467803955,
"max": 3.2958364486694336,
"count": 13
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 43759.421875,
"min": 42781.015625,
"max": 105466.765625,
"count": 13
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 811.5,
"min": 494.7142857142857,
"max": 906.6,
"count": 13
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19476.0,
"min": 13852.0,
"max": 27716.0,
"count": 13
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1202.1297790220663,
"min": 1198.6916415169974,
"max": 1202.9673173073832,
"count": 13
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7212.778674132397,
"min": 4798.067989990133,
"max": 14399.635556919653,
"count": 13
},
"SoccerTwos.Step.mean": {
"value": 129712.0,
"min": 9876.0,
"max": 129712.0,
"count": 13
},
"SoccerTwos.Step.sum": {
"value": 129712.0,
"min": 9876.0,
"max": 129712.0,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.0014713257551193237,
"min": -0.0014713257551193237,
"max": 0.05451347306370735,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.017655909061431885,
"min": -0.017655909061431885,
"max": 0.8722155690193176,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0014713257551193237,
"min": -0.0014713257551193237,
"max": 0.05451347306370735,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.017655909061431885,
"min": -0.017655909061431885,
"max": 0.8722155690193176,
"count": 13
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 13
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.5,
"min": -0.5,
"max": 0.2147230735191932,
"count": 13
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -6.0,
"min": -6.0,
"max": 2.7913999557495117,
"count": 13
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.5,
"min": -0.5,
"max": 0.2147230735191932,
"count": 13
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -6.0,
"min": -6.0,
"max": 2.7913999557495117,
"count": 13
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 13
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.012890987443582465,
"min": 0.00523900882108137,
"max": 0.013176202323908608,
"count": 5
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.012890987443582465,
"min": 0.00523900882108137,
"max": 0.013176202323908608,
"count": 5
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.001682706782594323,
"min": 0.0012276781412462394,
"max": 0.003556596456716458,
"count": 5
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.001682706782594323,
"min": 0.0012276781412462394,
"max": 0.003556596456716458,
"count": 5
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.001682706782594323,
"min": 0.0012276781412462394,
"max": 0.003556596456716458,
"count": 5
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.001682706782594323,
"min": 0.0012276781412462394,
"max": 0.003556596456716458,
"count": 5
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 5
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 5
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 5
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 5
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 5
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005,
"min": 0.005,
"max": 0.005,
"count": 5
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703591226",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/cybertron/anaconda3/envs/rl/bin/mlagents-learn /home/cybertron/Desktop/ml-agents/config/poca/SoccerTwos.yaml --env=training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1",
"numpy_version": "1.21.2",
"end_time_seconds": "1703591510"
},
"total": 284.3635966809961,
"count": 1,
"self": 0.21882801299216226,
"children": {
"run_training.setup": {
"total": 0.012106276000849903,
"count": 1,
"self": 0.012106276000849903
},
"TrainerController.start_learning": {
"total": 284.1326623920031,
"count": 1,
"self": 0.13699659491248894,
"children": {
"TrainerController._reset_env": {
"total": 2.0738567980006337,
"count": 1,
"self": 2.0738567980006337
},
"TrainerController.advance": {
"total": 279.10317788109387,
"count": 8577,
"self": 0.1445215416897554,
"children": {
"env_step": {
"total": 198.89272247839108,
"count": 8577,
"self": 100.6482229002795,
"children": {
"SubprocessEnvManager._take_step": {
"total": 98.15602654993563,
"count": 8577,
"self": 0.9458562079526018,
"children": {
"TorchPolicy.evaluate": {
"total": 97.21017034198303,
"count": 17038,
"self": 97.21017034198303
}
}
},
"workers": {
"total": 0.08847302817594027,
"count": 8577,
"self": 0.0,
"children": {
"worker_root": {
"total": 279.9393852657813,
"count": 8577,
"is_parallel": true,
"self": 195.6091849016957,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018885289973695762,
"count": 2,
"is_parallel": true,
"self": 0.0004485460085561499,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014399829888134263,
"count": 8,
"is_parallel": true,
"self": 0.0014399829888134263
}
}
},
"UnityEnvironment.step": {
"total": 0.019859125000948552,
"count": 1,
"is_parallel": true,
"self": 0.00042441000550752506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005746079987147823,
"count": 1,
"is_parallel": true,
"self": 0.0005746079987147823
},
"communicator.exchange": {
"total": 0.017522023001220077,
"count": 1,
"is_parallel": true,
"self": 0.017522023001220077
},
"steps_from_proto": {
"total": 0.0013380839955061674,
"count": 2,
"is_parallel": true,
"self": 0.0002767289915936999,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010613550039124675,
"count": 8,
"is_parallel": true,
"self": 0.0010613550039124675
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 84.33020036408561,
"count": 8576,
"is_parallel": true,
"self": 4.363296162802726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.5144244331968366,
"count": 8576,
"is_parallel": true,
"self": 2.5144244331968366
},
"communicator.exchange": {
"total": 64.61790556683991,
"count": 8576,
"is_parallel": true,
"self": 64.61790556683991
},
"steps_from_proto": {
"total": 12.834574201246141,
"count": 17152,
"is_parallel": true,
"self": 2.373371579713421,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.46120262153272,
"count": 68608,
"is_parallel": true,
"self": 10.46120262153272
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 80.06593386101304,
"count": 8577,
"self": 1.3821683759160805,
"children": {
"process_trajectory": {
"total": 11.169169155109557,
"count": 8577,
"self": 11.169169155109557
},
"_update_policy": {
"total": 67.5145963299874,
"count": 6,
"self": 12.408137330006866,
"children": {
"TorchPOCAOptimizer.update": {
"total": 55.10645899998053,
"count": 76,
"self": 55.10645899998053
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 2.8186311179961194,
"count": 1,
"self": 2.9962000553496182e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 2.818601155995566,
"count": 1,
"self": 2.818601155995566
}
}
}
}
}
}
}