poca-SoccerTwos / run_logs /timers.json
Loriiis's picture
First Push
56f18e0
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7809149026870728,
"min": 1.7258027791976929,
"max": 3.295712471008301,
"count": 676
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 34421.5234375,
"min": 19026.62890625,
"max": 111146.296875,
"count": 676
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 46.528301886792455,
"min": 38.110236220472444,
"max": 999.0,
"count": 676
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19728.0,
"min": 14532.0,
"max": 27812.0,
"count": 676
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1546.1583655500951,
"min": 1197.674072253018,
"max": 1559.373964110729,
"count": 667
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 327785.57349662017,
"min": 2395.348144506036,
"max": 380382.98396793095,
"count": 667
},
"SoccerTwos.Step.mean": {
"value": 6759994.0,
"min": 9922.0,
"max": 6759994.0,
"count": 676
},
"SoccerTwos.Step.sum": {
"value": 6759994.0,
"min": 9922.0,
"max": 6759994.0,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.05039449781179428,
"min": -0.12812939286231995,
"max": 0.13954395055770874,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -10.633238792419434,
"min": -23.59006118774414,
"max": 22.925369262695312,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.048979852348566055,
"min": -0.12161581963300705,
"max": 0.1427195519208908,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -10.334749221801758,
"min": -22.27959632873535,
"max": 22.355365753173828,
"count": 676
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 676
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.19450900452961853,
"min": -0.5804499983787537,
"max": 0.46777999997138975,
"count": 676
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -41.04139995574951,
"min": -55.459999799728394,
"max": 50.888399720191956,
"count": 676
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.19450900452961853,
"min": -0.5804499983787537,
"max": 0.46777999997138975,
"count": 676
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -41.04139995574951,
"min": -55.459999799728394,
"max": 50.888399720191956,
"count": 676
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 676
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 676
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.020284115670559308,
"min": 0.0111350942015027,
"max": 0.02531395632152756,
"count": 326
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.020284115670559308,
"min": 0.0111350942015027,
"max": 0.02531395632152756,
"count": 326
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11815406481424967,
"min": 1.1465595531317927e-05,
"max": 0.12800546834866205,
"count": 326
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11815406481424967,
"min": 1.1465595531317927e-05,
"max": 0.12800546834866205,
"count": 326
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.119600277642409,
"min": 1.2429546269700179e-05,
"max": 0.1303256573776404,
"count": 326
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.119600277642409,
"min": 1.2429546269700179e-05,
"max": 0.1303256573776404,
"count": 326
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 326
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 326
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 326
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 326
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 326
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 326
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698758718",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/loris/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu117",
"numpy_version": "1.23.5",
"end_time_seconds": "1698764424"
},
"total": 5706.621346769001,
"count": 1,
"self": 0.08893190000162576,
"children": {
"run_training.setup": {
"total": 0.009767644000021392,
"count": 1,
"self": 0.009767644000021392
},
"TrainerController.start_learning": {
"total": 5706.522647225,
"count": 1,
"self": 5.924937289886657,
"children": {
"TrainerController._reset_env": {
"total": 1.847396195998499,
"count": 34,
"self": 1.847396195998499
},
"TrainerController.advance": {
"total": 5698.672009086115,
"count": 466832,
"self": 5.84066097311279,
"children": {
"env_step": {
"total": 4304.882862541857,
"count": 466832,
"self": 3284.06622609237,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1017.2349912601608,
"count": 466832,
"self": 35.81308714550414,
"children": {
"TorchPolicy.evaluate": {
"total": 981.4219041146566,
"count": 851017,
"self": 981.4219041146566
}
}
},
"workers": {
"total": 3.5816451893265366,
"count": 466831,
"self": 0.0,
"children": {
"worker_root": {
"total": 5698.93553781256,
"count": 466831,
"is_parallel": true,
"self": 3065.1170296588193,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018119219998879998,
"count": 2,
"is_parallel": true,
"self": 0.00044821299979957985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00136370900008842,
"count": 8,
"is_parallel": true,
"self": 0.00136370900008842
}
}
},
"UnityEnvironment.step": {
"total": 0.012116857999899366,
"count": 1,
"is_parallel": true,
"self": 0.0002898419998018653,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021020799977122806,
"count": 1,
"is_parallel": true,
"self": 0.00021020799977122806
},
"communicator.exchange": {
"total": 0.01082476100009444,
"count": 1,
"is_parallel": true,
"self": 0.01082476100009444
},
"steps_from_proto": {
"total": 0.0007920470002318325,
"count": 2,
"is_parallel": true,
"self": 0.00015755899948999286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006344880007418396,
"count": 8,
"is_parallel": true,
"self": 0.0006344880007418396
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2633.7929615657454,
"count": 466830,
"is_parallel": true,
"self": 144.23465086849137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.530094460501,
"count": 466830,
"is_parallel": true,
"self": 82.530094460501
},
"communicator.exchange": {
"total": 2016.3691967651735,
"count": 466830,
"is_parallel": true,
"self": 2016.3691967651735
},
"steps_from_proto": {
"total": 390.65901947157954,
"count": 933660,
"is_parallel": true,
"self": 73.53245093436863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 317.1265685372109,
"count": 3734640,
"is_parallel": true,
"self": 317.1265685372109
}
}
}
}
},
"steps_from_proto": {
"total": 0.025546587995449954,
"count": 66,
"is_parallel": true,
"self": 0.005225847999554389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.020320739995895565,
"count": 264,
"is_parallel": true,
"self": 0.020320739995895565
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1387.9484855711457,
"count": 466831,
"self": 41.307249054396834,
"children": {
"process_trajectory": {
"total": 549.9510260567513,
"count": 466831,
"self": 548.7462396517508,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2047864050005046,
"count": 13,
"self": 1.2047864050005046
}
}
},
"_update_policy": {
"total": 796.6902104599976,
"count": 326,
"self": 422.1617266649687,
"children": {
"TorchPOCAOptimizer.update": {
"total": 374.52848379502893,
"count": 9783,
"self": 374.52848379502893
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.40999132883735e-07,
"count": 1,
"self": 6.40999132883735e-07
},
"TrainerController._save_models": {
"total": 0.07830401200044435,
"count": 1,
"self": 0.0008399329999519978,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07746407900049235,
"count": 1,
"self": 0.07746407900049235
}
}
}
}
}
}
}