poca-SoccerTwos / run_logs /timers.json
ByteExplorer's picture
First Push
de8ffc4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.8700988292694092,
"min": 1.8700988292694092,
"max": 3.295753002166748,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 37820.87890625,
"min": 24672.630859375,
"max": 150897.5625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 46.4811320754717,
"min": 39.967213114754095,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19708.0,
"min": 15064.0,
"max": 27916.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1597.737497674885,
"min": 1199.9771334302864,
"max": 1597.737497674885,
"count": 486
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 338720.3495070756,
"min": 2400.4631656037986,
"max": 379875.8964168029,
"count": 486
},
"SoccerTwos.Step.mean": {
"value": 4999914.0,
"min": 9980.0,
"max": 4999914.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999914.0,
"min": 9980.0,
"max": 4999914.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.02873898483812809,
"min": -0.10513824969530106,
"max": 0.17450928688049316,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -6.063925743103027,
"min": -19.9420108795166,
"max": 24.678964614868164,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.02994154952466488,
"min": -0.1047244593501091,
"max": 0.17646732926368713,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -6.317667007446289,
"min": -20.840167999267578,
"max": 24.9876708984375,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.01920378999122511,
"min": -0.631578947368421,
"max": 0.4256228600229536,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -4.0519996881484985,
"min": -52.351199984550476,
"max": 50.92179983854294,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.01920378999122511,
"min": -0.631578947368421,
"max": 0.4256228600229536,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -4.0519996881484985,
"min": -52.351199984550476,
"max": 50.92179983854294,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016084134335202786,
"min": 0.01000754886190407,
"max": 0.024137182723886024,
"count": 240
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016084134335202786,
"min": 0.01000754886190407,
"max": 0.024137182723886024,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10967138409614563,
"min": 0.00010499681508614837,
"max": 0.11730341464281083,
"count": 240
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10967138409614563,
"min": 0.00010499681508614837,
"max": 0.11730341464281083,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11196163892745972,
"min": 0.00010591759055387229,
"max": 0.12087150886654854,
"count": 240
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11196163892745972,
"min": 0.00010591759055387229,
"max": 0.12087150886654854,
"count": 240
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 240
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 240
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 240
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689808656",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/home/arran/.local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689813487"
},
"total": 4830.369840984,
"count": 1,
"self": 0.1677002939977683,
"children": {
"run_training.setup": {
"total": 0.008510141000442673,
"count": 1,
"self": 0.008510141000442673
},
"TrainerController.start_learning": {
"total": 4830.193630549002,
"count": 1,
"self": 4.295788931449351,
"children": {
"TrainerController._reset_env": {
"total": 3.0721712700324133,
"count": 25,
"self": 3.0721712700324133
},
"TrainerController.advance": {
"total": 4822.658342429517,
"count": 339801,
"self": 3.9652985607026494,
"children": {
"env_step": {
"total": 3438.6393926086166,
"count": 339801,
"self": 2587.7817735170393,
"children": {
"SubprocessEnvManager._take_step": {
"total": 848.2348908918575,
"count": 339801,
"self": 23.944038431567606,
"children": {
"TorchPolicy.evaluate": {
"total": 824.2908524602899,
"count": 632342,
"self": 824.2908524602899
}
}
},
"workers": {
"total": 2.622728199719859,
"count": 339801,
"self": 0.0,
"children": {
"worker_root": {
"total": 4824.33143751225,
"count": 339801,
"is_parallel": true,
"self": 2717.224129579714,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016139149956870824,
"count": 2,
"is_parallel": true,
"self": 0.0005800950093544088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010338199863326736,
"count": 8,
"is_parallel": true,
"self": 0.0010338199863326736
}
}
},
"UnityEnvironment.step": {
"total": 0.014475203002803028,
"count": 1,
"is_parallel": true,
"self": 0.000361960002919659,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003173540026182309,
"count": 1,
"is_parallel": true,
"self": 0.0003173540026182309
},
"communicator.exchange": {
"total": 0.012671557000430766,
"count": 1,
"is_parallel": true,
"self": 0.012671557000430766
},
"steps_from_proto": {
"total": 0.0011243319968343712,
"count": 2,
"is_parallel": true,
"self": 0.0002327539841644466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008915780126699246,
"count": 8,
"is_parallel": true,
"self": 0.0008915780126699246
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2107.0740649625513,
"count": 339800,
"is_parallel": true,
"self": 128.00429471890675,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.17355650067475,
"count": 339800,
"is_parallel": true,
"self": 87.17355650067475
},
"communicator.exchange": {
"total": 1505.6371768408208,
"count": 339800,
"is_parallel": true,
"self": 1505.6371768408208
},
"steps_from_proto": {
"total": 386.2590369021491,
"count": 679600,
"is_parallel": true,
"self": 73.62846255295153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 312.63057434919756,
"count": 2718400,
"is_parallel": true,
"self": 312.63057434919756
}
}
}
}
},
"steps_from_proto": {
"total": 0.033242969984712545,
"count": 48,
"is_parallel": true,
"self": 0.006481821008492261,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.026761148976220284,
"count": 192,
"is_parallel": true,
"self": 0.026761148976220284
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1380.0536512601975,
"count": 339801,
"self": 35.37263698402239,
"children": {
"process_trajectory": {
"total": 442.39681883015874,
"count": 339801,
"self": 440.756553943138,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6402648870207486,
"count": 10,
"self": 1.6402648870207486
}
}
},
"_update_policy": {
"total": 902.2841954460164,
"count": 240,
"self": 484.9813328000673,
"children": {
"TorchPOCAOptimizer.update": {
"total": 417.30286264594906,
"count": 7200,
"self": 417.30286264594906
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.410045898519456e-07,
"count": 1,
"self": 6.410045898519456e-07
},
"TrainerController._save_models": {
"total": 0.16732727699854877,
"count": 1,
"self": 0.0011759909975808114,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16615128600096796,
"count": 1,
"self": 0.16615128600096796
}
}
}
}
}
}
}