juliowaissman's picture
2M trials
71f51c0 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.44080689549446106,
"min": 0.3898777961730957,
"max": 2.872626304626465,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4538.5478515625,
"min": 3726.841796875,
"max": 29481.763671875,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.357747077941895,
"min": 0.33006829023361206,
"max": 14.494757652282715,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2943.338134765625,
"min": 64.03324890136719,
"max": 2971.42529296875,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07380807045516193,
"min": 0.05873057815915672,
"max": 0.07915356373711141,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3690403522758096,
"min": 0.23492231263662688,
"max": 0.3885683102367586,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15582360873619716,
"min": 0.12412929655436208,
"max": 0.2838145378173566,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7791180436809858,
"min": 0.49651718621744834,
"max": 1.419072689086783,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.963636363636365,
"min": 3.477272727272727,
"max": 28.618181818181817,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1538.0,
"min": 153.0,
"max": 1574.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.963636363636365,
"min": 3.477272727272727,
"max": 28.618181818181817,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1538.0,
"min": 153.0,
"max": 1574.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709101425",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709105808"
},
"total": 4382.766206389,
"count": 1,
"self": 0.4401724290000857,
"children": {
"run_training.setup": {
"total": 0.052345207000030314,
"count": 1,
"self": 0.052345207000030314
},
"TrainerController.start_learning": {
"total": 4382.273688753,
"count": 1,
"self": 5.642875814211948,
"children": {
"TrainerController._reset_env": {
"total": 3.731413597000028,
"count": 1,
"self": 3.731413597000028
},
"TrainerController.advance": {
"total": 4372.803531027788,
"count": 181870,
"self": 2.747611305758255,
"children": {
"env_step": {
"total": 4370.05591972203,
"count": 181870,
"self": 2832.0021802771644,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1535.2323182479709,
"count": 181870,
"self": 14.661662464929123,
"children": {
"TorchPolicy.evaluate": {
"total": 1520.5706557830417,
"count": 181870,
"self": 1520.5706557830417
}
}
},
"workers": {
"total": 2.821421196894562,
"count": 181870,
"self": 0.0,
"children": {
"worker_root": {
"total": 4371.0514845270445,
"count": 181870,
"is_parallel": true,
"self": 2159.8460239319893,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005962534999980562,
"count": 1,
"is_parallel": true,
"self": 0.004241137999883904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017213970000966583,
"count": 10,
"is_parallel": true,
"self": 0.0017213970000966583
}
}
},
"UnityEnvironment.step": {
"total": 0.038267865999955575,
"count": 1,
"is_parallel": true,
"self": 0.000743744999908813,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042082100003426604,
"count": 1,
"is_parallel": true,
"self": 0.00042082100003426604
},
"communicator.exchange": {
"total": 0.03525418900005661,
"count": 1,
"is_parallel": true,
"self": 0.03525418900005661
},
"steps_from_proto": {
"total": 0.001849110999955883,
"count": 1,
"is_parallel": true,
"self": 0.00035809499968308955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014910160002727935,
"count": 10,
"is_parallel": true,
"self": 0.0014910160002727935
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2211.2054605950552,
"count": 181869,
"is_parallel": true,
"self": 105.39146143787366,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 52.13857466595198,
"count": 181869,
"is_parallel": true,
"self": 52.13857466595198
},
"communicator.exchange": {
"total": 1718.5258254620935,
"count": 181869,
"is_parallel": true,
"self": 1718.5258254620935
},
"steps_from_proto": {
"total": 335.14959902913586,
"count": 181869,
"is_parallel": true,
"self": 62.38845233499592,
"children": {
"_process_rank_one_or_two_observation": {
"total": 272.76114669413994,
"count": 1818690,
"is_parallel": true,
"self": 272.76114669413994
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00034551699991425266,
"count": 1,
"self": 0.00034551699991425266,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 4319.845725604172,
"count": 6587674,
"is_parallel": true,
"self": 140.6138805344599,
"children": {
"process_trajectory": {
"total": 2370.324112816709,
"count": 6587674,
"is_parallel": true,
"self": 2363.9729371537114,
"children": {
"RLTrainer._checkpoint": {
"total": 6.351175662998003,
"count": 40,
"is_parallel": true,
"self": 6.351175662998003
}
}
},
"_update_policy": {
"total": 1808.907732253003,
"count": 909,
"is_parallel": true,
"self": 514.1640507789311,
"children": {
"TorchPPOOptimizer.update": {
"total": 1294.7436814740718,
"count": 46341,
"is_parallel": true,
"self": 1294.7436814740718
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09552279699983046,
"count": 1,
"self": 0.001027610999699391,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09449518600013107,
"count": 1,
"self": 0.09449518600013107
}
}
}
}
}
}
}