poca-SoccerTwos / run_logs /timers.json
splusminusx's picture
Initial Commit. 500K steps
160eac4
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.105114459991455,
"min": 3.105114459991455,
"max": 3.2957065105438232,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 48688.1953125,
"min": 42019.0859375,
"max": 109748.3671875,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 243.05555555555554,
"min": 206.66666666666666,
"max": 956.2,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17500.0,
"min": 16512.0,
"max": 25152.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1245.8481626765551,
"min": 1199.5531159985107,
"max": 1245.8481626765551,
"count": 50
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 44850.533856355985,
"min": 2415.6549218818986,
"max": 64590.14878167217,
"count": 50
},
"SoccerTwos.Step.mean": {
"value": 499886.0,
"min": 9592.0,
"max": 499886.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499886.0,
"min": 9592.0,
"max": 499886.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03981245681643486,
"min": -0.03981245681643486,
"max": 0.03757605701684952,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.4332484006881714,
"min": -1.4332484006881714,
"max": 1.127281665802002,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.04524083435535431,
"min": -0.04524083435535431,
"max": 0.04333335533738136,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.6286699771881104,
"min": -1.6286699771881104,
"max": 1.3000006675720215,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.14067777825726402,
"min": -0.4166666666666667,
"max": 0.44489090551029553,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.064400017261505,
"min": -7.836000025272369,
"max": 9.787599921226501,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.14067777825726402,
"min": -0.4166666666666667,
"max": 0.44489090551029553,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.064400017261505,
"min": -7.836000025272369,
"max": 9.787599921226501,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017825303140368003,
"min": 0.014612562129817283,
"max": 0.020802132273092865,
"count": 23
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017825303140368003,
"min": 0.014612562129817283,
"max": 0.020802132273092865,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.02149607219422857,
"min": 0.0014562032088482131,
"max": 0.02149607219422857,
"count": 23
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.02149607219422857,
"min": 0.0014562032088482131,
"max": 0.02149607219422857,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.02265563899030288,
"min": 0.001528622421513622,
"max": 0.02265563899030288,
"count": 23
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.02265563899030288,
"min": 0.001528622421513622,
"max": 0.02265563899030288,
"count": 23
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 23
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 23
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 23
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680725815",
"python_version": "3.9.13 (main, Aug 17 2022, 04:34:27) \n[Clang 11.1.0 ]",
"command_line_arguments": "/Users/romanmalygin/.venvs/aivsai/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1680731565"
},
"total": 5750.20565045,
"count": 1,
"self": 0.4288838359998408,
"children": {
"run_training.setup": {
"total": 0.05599138600000009,
"count": 1,
"self": 0.05599138600000009
},
"TrainerController.start_learning": {
"total": 5749.720775228,
"count": 1,
"self": 1.0230867321197366,
"children": {
"TrainerController._reset_env": {
"total": 5.452135400999538,
"count": 3,
"self": 5.452135400999538
},
"TrainerController.advance": {
"total": 5742.88802891688,
"count": 32593,
"self": 1.0805058188434487,
"children": {
"env_step": {
"total": 4398.036322860026,
"count": 32593,
"self": 4261.753033149064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.63617563993319,
"count": 32593,
"self": 5.957987499062369,
"children": {
"TorchPolicy.evaluate": {
"total": 129.67818814087082,
"count": 64428,
"self": 129.67818814087082
}
}
},
"workers": {
"total": 0.6471140710290397,
"count": 32592,
"self": 0.0,
"children": {
"worker_root": {
"total": 5742.35773433805,
"count": 32592,
"is_parallel": true,
"self": 1604.6105905860404,
"children": {
"steps_from_proto": {
"total": 0.01477887199999639,
"count": 6,
"is_parallel": true,
"self": 0.0021083460009911903,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0126705259990052,
"count": 24,
"is_parallel": true,
"self": 0.0126705259990052
}
}
},
"UnityEnvironment.step": {
"total": 4137.73236488001,
"count": 32592,
"is_parallel": true,
"self": 11.103684912936842,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 82.81244763706641,
"count": 32592,
"is_parallel": true,
"self": 82.81244763706641
},
"communicator.exchange": {
"total": 3894.978669442948,
"count": 32592,
"is_parallel": true,
"self": 3894.978669442948
},
"steps_from_proto": {
"total": 148.83756288705914,
"count": 65184,
"is_parallel": true,
"self": 20.501095915977203,
"children": {
"_process_rank_one_or_two_observation": {
"total": 128.33646697108193,
"count": 260736,
"is_parallel": true,
"self": 128.33646697108193
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1343.7712002380104,
"count": 32592,
"self": 7.544408949050194,
"children": {
"process_trajectory": {
"total": 161.65574759096305,
"count": 32592,
"self": 161.22057021996258,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4351773710004636,
"count": 1,
"self": 0.4351773710004636
}
}
},
"_update_policy": {
"total": 1174.571043697997,
"count": 23,
"self": 121.14498982900113,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1053.426053868996,
"count": 690,
"self": 1053.426053868996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.354000338935293e-06,
"count": 1,
"self": 4.354000338935293e-06
},
"TrainerController._save_models": {
"total": 0.3575198240005193,
"count": 1,
"self": 0.007720928000708227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34979889599981107,
"count": 1,
"self": 0.34979889599981107
}
}
}
}
}
}
}