poca-SoccerTwos / run_logs /timers.json
sanak
First Push
77867ee
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7588081359863281,
"min": 1.6908797025680542,
"max": 3.295719861984253,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33825.3984375,
"min": 5273.1435546875,
"max": 143272.90625,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.93827160493827,
"min": 37.59230769230769,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19744.0,
"min": 11236.0,
"max": 29340.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1561.6993778235915,
"min": 1182.9172301275994,
"max": 1572.7152468564125,
"count": 930
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 252995.29920742183,
"min": 2365.834460255199,
"max": 396588.2787541444,
"count": 930
},
"SoccerTwos.Step.mean": {
"value": 9999946.0,
"min": 9670.0,
"max": 9999946.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999946.0,
"min": 9670.0,
"max": 9999946.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0005929771577939391,
"min": -0.10993397235870361,
"max": 0.14778369665145874,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.0960623025894165,
"min": -19.898048400878906,
"max": 25.182292938232422,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.004628224764019251,
"min": -0.11462147533893585,
"max": 0.1500168740749359,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.7497724294662476,
"min": -20.74648666381836,
"max": 25.204845428466797,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.016464196605446898,
"min": -0.6153846153846154,
"max": 0.37161367673140305,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -2.6671998500823975,
"min": -52.334999799728394,
"max": 61.50600028038025,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.016464196605446898,
"min": -0.6153846153846154,
"max": 0.37161367673140305,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -2.6671998500823975,
"min": -52.334999799728394,
"max": 61.50600028038025,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01662024947630319,
"min": 0.010978754617584248,
"max": 0.02382210058373554,
"count": 478
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01662024947630319,
"min": 0.010978754617584248,
"max": 0.02382210058373554,
"count": 478
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10861011470357577,
"min": 2.6484625664124907e-06,
"max": 0.12980319807926813,
"count": 478
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10861011470357577,
"min": 2.6484625664124907e-06,
"max": 0.12980319807926813,
"count": 478
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11008004421989123,
"min": 2.90544399679978e-06,
"max": 0.1320348324875037,
"count": 478
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11008004421989123,
"min": 2.90544399679978e-06,
"max": 0.1320348324875037,
"count": 478
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 478
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 478
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 478
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 478
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 478
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 478
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679983956",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:44) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/nagendrak/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680033683"
},
"total": 49724.372690283,
"count": 1,
"self": 0.47004871400713455,
"children": {
"run_training.setup": {
"total": 0.053126408000000014,
"count": 1,
"self": 0.053126408000000014
},
"TrainerController.start_learning": {
"total": 49723.849515161,
"count": 1,
"self": 18.957341480265313,
"children": {
"TrainerController._reset_env": {
"total": 5.264957535999057,
"count": 50,
"self": 5.264957535999057
},
"TrainerController.advance": {
"total": 49699.35271289774,
"count": 679857,
"self": 19.628694441627886,
"children": {
"env_step": {
"total": 14516.588156632326,
"count": 679857,
"self": 11866.56155001186,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2638.729141626691,
"count": 679857,
"self": 123.66091293688487,
"children": {
"TorchPolicy.evaluate": {
"total": 2515.0682286898063,
"count": 1269224,
"self": 2515.0682286898063
}
}
},
"workers": {
"total": 11.297464993775279,
"count": 679857,
"self": 0.0,
"children": {
"worker_root": {
"total": 49685.76442921223,
"count": 679857,
"is_parallel": true,
"self": 40025.68255846995,
"children": {
"steps_from_proto": {
"total": 0.11668987000523767,
"count": 100,
"is_parallel": true,
"self": 0.02464874798996819,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.09204112201526948,
"count": 400,
"is_parallel": true,
"self": 0.09204112201526948
}
}
},
"UnityEnvironment.step": {
"total": 9659.965180872274,
"count": 679857,
"is_parallel": true,
"self": 544.2142709174022,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 353.90709360012596,
"count": 679857,
"is_parallel": true,
"self": 353.90709360012596
},
"communicator.exchange": {
"total": 7078.385508205885,
"count": 679857,
"is_parallel": true,
"self": 7078.385508205885
},
"steps_from_proto": {
"total": 1683.4583081488595,
"count": 1359714,
"is_parallel": true,
"self": 355.3363779909266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1328.1219301579329,
"count": 5438856,
"is_parallel": true,
"self": 1328.1219301579329
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 35163.13586182378,
"count": 679857,
"self": 127.06356120001874,
"children": {
"process_trajectory": {
"total": 4555.459942281793,
"count": 679857,
"self": 4550.090779544793,
"children": {
"RLTrainer._checkpoint": {
"total": 5.369162737000352,
"count": 20,
"self": 5.369162737000352
}
}
},
"_update_policy": {
"total": 30480.61235834197,
"count": 478,
"self": 2022.1009502181769,
"children": {
"TorchPOCAOptimizer.update": {
"total": 28458.51140812379,
"count": 14352,
"self": 28458.51140812379
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.319996954407543e-06,
"count": 1,
"self": 1.319996954407543e-06
},
"TrainerController._save_models": {
"total": 0.27450192700052867,
"count": 1,
"self": 0.001933756997459568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2725681700030691,
"count": 1,
"self": 0.2725681700030691
}
}
}
}
}
}
}