ppo-PyramidRND / run_logs /timers.json
ramathuzen's picture
First Push
e7b748d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4606909155845642,
"min": 0.43714478611946106,
"max": 1.3890351057052612,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13798.6142578125,
"min": 12981.451171875,
"max": 42137.76953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4690605700016022,
"min": -0.11727041006088257,
"max": 0.4980018138885498,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 126.17729187011719,
"min": -28.262168884277344,
"max": 135.95449829101562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.014398873783648014,
"min": -0.006284828297793865,
"max": 0.4995509088039398,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.8732969760894775,
"min": -1.7157580852508545,
"max": 118.39356231689453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0712941121762722,
"min": 0.0655800352868114,
"max": 0.07387820006300679,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.069411682644083,
"min": 0.4838501576275027,
"max": 1.069411682644083,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015411954958755008,
"min": 0.000288538282866579,
"max": 0.015411954958755008,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23117932438132513,
"min": 0.003173921111532369,
"max": 0.23117932438132513,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.573057475680003e-06,
"min": 7.573057475680003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011359586213520004,
"min": 0.00011359586213520004,
"max": 0.003137091254303,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252432000000004,
"min": 0.10252432000000004,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378648000000006,
"min": 1.3886848,
"max": 2.401435,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002621795680000001,
"min": 0.0002621795680000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039326935200000015,
"min": 0.0039326935200000015,
"max": 0.1045951303,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010643676854670048,
"min": 0.010643676854670048,
"max": 0.5520880818367004,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1596551537513733,
"min": 0.15644726157188416,
"max": 3.864616632461548,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 372.1875,
"min": 352.04938271604937,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29775.0,
"min": 15984.0,
"max": 32962.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5265265623979931,
"min": -1.0000000521540642,
"max": 1.5985431862466128,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 120.59559842944145,
"min": -31.998401656746864,
"max": 129.48199808597565,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5265265623979931,
"min": -1.0000000521540642,
"max": 1.5985431862466128,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 120.59559842944145,
"min": -31.998401656746864,
"max": 129.48199808597565,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.040932992914038756,
"min": 0.040932992914038756,
"max": 12.415960829705,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.233706440209062,
"min": 3.233706440209062,
"max": 198.65537327528,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703601342",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703603487"
},
"total": 2145.310186941,
"count": 1,
"self": 0.4764474190005785,
"children": {
"run_training.setup": {
"total": 0.07238672099992982,
"count": 1,
"self": 0.07238672099992982
},
"TrainerController.start_learning": {
"total": 2144.7613528009997,
"count": 1,
"self": 1.2716352280117462,
"children": {
"TrainerController._reset_env": {
"total": 2.2448563319999266,
"count": 1,
"self": 2.2448563319999266
},
"TrainerController.advance": {
"total": 2141.1640665119885,
"count": 63626,
"self": 1.3900983371318034,
"children": {
"env_step": {
"total": 1524.6544232209749,
"count": 63626,
"self": 1397.6830240319946,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.147081246987,
"count": 63626,
"self": 4.555509202920803,
"children": {
"TorchPolicy.evaluate": {
"total": 121.5915720440662,
"count": 62558,
"self": 121.5915720440662
}
}
},
"workers": {
"total": 0.8243179419932858,
"count": 63626,
"self": 0.0,
"children": {
"worker_root": {
"total": 2139.8374637500406,
"count": 63626,
"is_parallel": true,
"self": 858.5519115900561,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016692220001459646,
"count": 1,
"is_parallel": true,
"self": 0.0005258419996607699,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011433800004851946,
"count": 8,
"is_parallel": true,
"self": 0.0011433800004851946
}
}
},
"UnityEnvironment.step": {
"total": 0.04900357199994687,
"count": 1,
"is_parallel": true,
"self": 0.0005782889998044993,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005131570001140062,
"count": 1,
"is_parallel": true,
"self": 0.0005131570001140062
},
"communicator.exchange": {
"total": 0.046311034000154905,
"count": 1,
"is_parallel": true,
"self": 0.046311034000154905
},
"steps_from_proto": {
"total": 0.0016010919998734607,
"count": 1,
"is_parallel": true,
"self": 0.00034431200083417934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012567799990392814,
"count": 8,
"is_parallel": true,
"self": 0.0012567799990392814
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1281.2855521599845,
"count": 63625,
"is_parallel": true,
"self": 34.264528917010466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.14103483302506,
"count": 63625,
"is_parallel": true,
"self": 24.14103483302506
},
"communicator.exchange": {
"total": 1124.4779347919575,
"count": 63625,
"is_parallel": true,
"self": 1124.4779347919575
},
"steps_from_proto": {
"total": 98.40205361799144,
"count": 63625,
"is_parallel": true,
"self": 19.655919320888188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.74613429710325,
"count": 509000,
"is_parallel": true,
"self": 78.74613429710325
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 615.1195449538818,
"count": 63626,
"self": 2.5070267587857415,
"children": {
"process_trajectory": {
"total": 123.97319268609726,
"count": 63626,
"self": 123.78747826509743,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1857144209998296,
"count": 2,
"self": 0.1857144209998296
}
}
},
"_update_policy": {
"total": 488.6393255089988,
"count": 443,
"self": 287.8816277979399,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.75769771105888,
"count": 22863,
"self": 200.75769771105888
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.970000635599717e-07,
"count": 1,
"self": 8.970000635599717e-07
},
"TrainerController._save_models": {
"total": 0.08079383199947188,
"count": 1,
"self": 0.001319702999353467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07947412900011841,
"count": 1,
"self": 0.07947412900011841
}
}
}
}
}
}
}