poca-SoccerTwos / run_logs /timers.json
apple9855's picture
First Push
e1f5e2a verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5112178325653076,
"min": 1.4550443887710571,
"max": 3.2957026958465576,
"count": 929
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 28193.279296875,
"min": 6817.09765625,
"max": 115370.15625,
"count": 929
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 68.85915492957747,
"min": 37.64341085271318,
"max": 999.0,
"count": 929
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19556.0,
"min": 3996.0,
"max": 30336.0,
"count": 929
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1604.2751922925138,
"min": 1191.3060540824733,
"max": 1639.8098799595462,
"count": 921
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 227807.07730553695,
"min": 2383.8214602694097,
"max": 417643.9510141752,
"count": 921
},
"SoccerTwos.Step.mean": {
"value": 9289971.0,
"min": 9236.0,
"max": 9289971.0,
"count": 929
},
"SoccerTwos.Step.sum": {
"value": 9289971.0,
"min": 9236.0,
"max": 9289971.0,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.012447405606508255,
"min": -0.12627191841602325,
"max": 0.20163512229919434,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.7799789905548096,
"min": -26.01201629638672,
"max": 37.30249786376953,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.013726651668548584,
"min": -0.13103096187114716,
"max": 0.19688361883163452,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.9629111289978027,
"min": -26.99237823486328,
"max": 36.42346954345703,
"count": 929
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 929
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.028352448990294984,
"min": -0.631578947368421,
"max": 0.44630256371620375,
"count": 929
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.054400205612183,
"min": -73.70879977941513,
"max": 65.5147989988327,
"count": 929
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.028352448990294984,
"min": -0.631578947368421,
"max": 0.44630256371620375,
"count": 929
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.054400205612183,
"min": -73.70879977941513,
"max": 65.5147989988327,
"count": 929
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 929
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 929
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014112015633145347,
"min": 0.01134627233647431,
"max": 0.024001921977226934,
"count": 448
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014112015633145347,
"min": 0.01134627233647431,
"max": 0.024001921977226934,
"count": 448
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10346591497461001,
"min": 0.0002900623774621636,
"max": 0.1367385797202587,
"count": 448
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10346591497461001,
"min": 0.0002900623774621636,
"max": 0.1367385797202587,
"count": 448
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10490976000825564,
"min": 0.0002921819883340504,
"max": 0.1405584176381429,
"count": 448
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10490976000825564,
"min": 0.0002921819883340504,
"max": 0.1405584176381429,
"count": 448
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 448
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 448
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 448
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 448
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 448
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 448
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726908023",
"python_version": "3.10.12 (main, Jul 5 2023, 15:02:25) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/applewang/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1726934588"
},
"total": 26565.178129000007,
"count": 1,
"self": 0.09012758400058374,
"children": {
"run_training.setup": {
"total": 0.4649967079749331,
"count": 1,
"self": 0.4649967079749331
},
"TrainerController.start_learning": {
"total": 26564.62300470803,
"count": 1,
"self": 5.512216902105138,
"children": {
"TrainerController._reset_env": {
"total": 4.287491457012948,
"count": 47,
"self": 4.287491457012948
},
"TrainerController.advance": {
"total": 26554.74454468087,
"count": 646127,
"self": 4.995779757737182,
"children": {
"env_step": {
"total": 21410.067916036467,
"count": 646127,
"self": 20712.225156166125,
"children": {
"SubprocessEnvManager._take_step": {
"total": 694.1660744313267,
"count": 646127,
"self": 22.008323678863235,
"children": {
"TorchPolicy.evaluate": {
"total": 672.1577507524635,
"count": 1168202,
"self": 672.1577507524635
}
}
},
"workers": {
"total": 3.676685439015273,
"count": 646127,
"self": 0.0,
"children": {
"worker_root": {
"total": 26548.379569957906,
"count": 646127,
"is_parallel": true,
"self": 6584.593463205092,
"children": {
"steps_from_proto": {
"total": 0.05394041957333684,
"count": 94,
"is_parallel": true,
"self": 0.0066166718024760485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04732374777086079,
"count": 376,
"is_parallel": true,
"self": 0.04732374777086079
}
}
},
"UnityEnvironment.step": {
"total": 19963.73216633324,
"count": 646127,
"is_parallel": true,
"self": 55.028023537714034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 372.25428532867227,
"count": 646127,
"is_parallel": true,
"self": 372.25428532867227
},
"communicator.exchange": {
"total": 18852.889559973322,
"count": 646127,
"is_parallel": true,
"self": 18852.889559973322
},
"steps_from_proto": {
"total": 683.5602974935318,
"count": 1292254,
"is_parallel": true,
"self": 80.48080466187093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 603.0794928316609,
"count": 5169016,
"is_parallel": true,
"self": 603.0794928316609
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 5139.680848886666,
"count": 646127,
"self": 41.696486964356154,
"children": {
"process_trajectory": {
"total": 1118.7872105536517,
"count": 646127,
"self": 1117.3099692627438,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4772412909078412,
"count": 18,
"self": 1.4772412909078412
}
}
},
"_update_policy": {
"total": 3979.1971513686585,
"count": 449,
"self": 445.7941814964288,
"children": {
"TorchPOCAOptimizer.update": {
"total": 3533.4029698722297,
"count": 13470,
"self": 3533.4029698722297
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.090275175869465e-07,
"count": 1,
"self": 7.090275175869465e-07
},
"TrainerController._save_models": {
"total": 0.07875095901545137,
"count": 1,
"self": 0.0008666260400786996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07788433297537267,
"count": 1,
"self": 0.07788433297537267
}
}
}
}
}
}
}