alkiskoudounas's picture
First Push SnowballTarget1
4795a25
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8770827651023865,
"min": 0.8770827651023865,
"max": 2.8713061809539795,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8384.0341796875,
"min": 8384.0341796875,
"max": 29373.462890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.818958282470703,
"min": 0.43189752101898193,
"max": 12.818958282470703,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2499.69677734375,
"min": 83.78811645507812,
"max": 2598.1533203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06862373603079878,
"min": 0.061229430602158096,
"max": 0.07675846736303499,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27449494412319514,
"min": 0.24491772240863238,
"max": 0.3747985126615009,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21265170818158224,
"min": 0.1089165052216427,
"max": 0.3115755416014615,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.850606832726329,
"min": 0.4356660208865708,
"max": 1.440588690486609,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.136363636363637,
"min": 2.909090909090909,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1106.0,
"min": 128.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.136363636363637,
"min": 2.909090909090909,
"max": 25.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1106.0,
"min": 128.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680713983",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=/content/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680714480"
},
"total": 496.94472222800005,
"count": 1,
"self": 0.850025397999957,
"children": {
"run_training.setup": {
"total": 0.1178061580000076,
"count": 1,
"self": 0.1178061580000076
},
"TrainerController.start_learning": {
"total": 495.9768906720001,
"count": 1,
"self": 0.6815948190003382,
"children": {
"TrainerController._reset_env": {
"total": 4.1662111589999995,
"count": 1,
"self": 4.1662111589999995
},
"TrainerController.advance": {
"total": 490.90605817699964,
"count": 18211,
"self": 0.3127388300001712,
"children": {
"env_step": {
"total": 490.59331934699946,
"count": 18211,
"self": 361.80261983099524,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.47257683999146,
"count": 18211,
"self": 1.963056829997754,
"children": {
"TorchPolicy.evaluate": {
"total": 126.50952000999371,
"count": 18211,
"self": 126.50952000999371
}
}
},
"workers": {
"total": 0.3181226760127629,
"count": 18211,
"self": 0.0,
"children": {
"worker_root": {
"total": 494.18160865001005,
"count": 18211,
"is_parallel": true,
"self": 223.56228600701536,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005593989000090005,
"count": 1,
"is_parallel": true,
"self": 0.0034814590001133183,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021125299999766867,
"count": 10,
"is_parallel": true,
"self": 0.0021125299999766867
}
}
},
"UnityEnvironment.step": {
"total": 0.03595542799996565,
"count": 1,
"is_parallel": true,
"self": 0.0006279530000483646,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034473400000933907,
"count": 1,
"is_parallel": true,
"self": 0.00034473400000933907
},
"communicator.exchange": {
"total": 0.033028054999931555,
"count": 1,
"is_parallel": true,
"self": 0.033028054999931555
},
"steps_from_proto": {
"total": 0.001954685999976391,
"count": 1,
"is_parallel": true,
"self": 0.0003730619998805196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015816240000958715,
"count": 10,
"is_parallel": true,
"self": 0.0015816240000958715
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 270.6193226429947,
"count": 18210,
"is_parallel": true,
"self": 10.639678645995332,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.750280540984477,
"count": 18210,
"is_parallel": true,
"self": 5.750280540984477
},
"communicator.exchange": {
"total": 220.03125686300393,
"count": 18210,
"is_parallel": true,
"self": 220.03125686300393
},
"steps_from_proto": {
"total": 34.19810659301095,
"count": 18210,
"is_parallel": true,
"self": 7.055226320990869,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.14288027202008,
"count": 182100,
"is_parallel": true,
"self": 27.14288027202008
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016067199999270088,
"count": 1,
"self": 0.00016067199999270088,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 487.0925316189831,
"count": 440499,
"is_parallel": true,
"self": 11.195191423990536,
"children": {
"process_trajectory": {
"total": 270.08584103299165,
"count": 440499,
"is_parallel": true,
"self": 269.0222231689917,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0636178639999798,
"count": 4,
"is_parallel": true,
"self": 1.0636178639999798
}
}
},
"_update_policy": {
"total": 205.81149916200093,
"count": 90,
"is_parallel": true,
"self": 75.07080814299786,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.74069101900307,
"count": 4587,
"is_parallel": true,
"self": 130.74069101900307
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22286584500011486,
"count": 1,
"self": 0.0012177630001133366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22164808200000152,
"count": 1,
"self": 0.22164808200000152
}
}
}
}
}
}
}