testpyramidsrnd / run_logs /timers.json
tatakof's picture
First Pyramids
0717be3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.791020929813385,
"min": 0.791020929813385,
"max": 1.489605188369751,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 23502.814453125,
"min": 23502.814453125,
"max": 45188.6640625,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479943.0,
"min": 29952.0,
"max": 479943.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479943.0,
"min": 29952.0,
"max": 479943.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.029185013845562935,
"min": -0.17986860871315002,
"max": -0.029185013845562935,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.091958522796631,
"min": -42.62886047363281,
"max": -7.091958522796631,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01778501830995083,
"min": 0.015657901763916016,
"max": 0.3353683650493622,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.321759223937988,
"min": 3.7735543251037598,
"max": 79.4822998046875,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07145601879996581,
"min": 0.064684027601684,
"max": 0.0731654304498678,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0003842631995215,
"min": 0.49195109521361313,
"max": 1.0003842631995215,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.004851729566885157,
"min": 0.0002235255053754145,
"max": 0.009355851846310973,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0679242139363922,
"min": 0.002682306064504974,
"max": 0.0679242139363922,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0848935907528568e-05,
"min": 2.0848935907528568e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029188510270539993,
"min": 0.00029188510270539993,
"max": 0.0030633577788807993,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10694961428571428,
"min": 0.10694961428571428,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4972946,
"min": 1.3773696000000002,
"max": 2.2502988,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000704266467142857,
"min": 0.000704266467142857,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009859730539999998,
"min": 0.009859730539999998,
"max": 0.10212980808000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016758795827627182,
"min": 0.016758795827627182,
"max": 0.4105682671070099,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23462314903736115,
"min": 0.23462314903736115,
"max": 2.8739778995513916,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 855.3428571428572,
"min": 855.3428571428572,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29937.0,
"min": 15984.0,
"max": 33121.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.2844743368881089,
"min": -1.0000000521540642,
"max": -0.2844743368881089,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -9.956601791083813,
"min": -31.997201666235924,
"max": -9.956601791083813,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.2844743368881089,
"min": -1.0000000521540642,
"max": -0.2844743368881089,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -9.956601791083813,
"min": -31.997201666235924,
"max": -9.956601791083813,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1487522017210722,
"min": 0.1487522017210722,
"max": 9.225888344459236,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.206327060237527,
"min": 5.206327060237527,
"max": 147.61421351134777,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1667817504",
"python_version": "3.7.15 (default, Oct 12 2022, 19:14:55) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1667818491"
},
"total": 986.4942468400001,
"count": 1,
"self": 0.4389137450000362,
"children": {
"run_training.setup": {
"total": 0.04243495499997607,
"count": 1,
"self": 0.04243495499997607
},
"TrainerController.start_learning": {
"total": 986.0128981400001,
"count": 1,
"self": 0.6611331060109933,
"children": {
"TrainerController._reset_env": {
"total": 11.128449637000017,
"count": 1,
"self": 11.128449637000017
},
"TrainerController.advance": {
"total": 974.124794237989,
"count": 31501,
"self": 0.7555307889666665,
"children": {
"env_step": {
"total": 624.1961802460218,
"count": 31501,
"self": 569.6990803320596,
"children": {
"SubprocessEnvManager._take_step": {
"total": 54.12975068195851,
"count": 31501,
"self": 2.4211058029628703,
"children": {
"TorchPolicy.evaluate": {
"total": 51.70864487899564,
"count": 31315,
"self": 17.842696481973803,
"children": {
"TorchPolicy.sample_actions": {
"total": 33.86594839702184,
"count": 31315,
"self": 33.86594839702184
}
}
}
}
},
"workers": {
"total": 0.36734923200367575,
"count": 31501,
"self": 0.0,
"children": {
"worker_root": {
"total": 983.7755190219989,
"count": 31501,
"is_parallel": true,
"self": 465.48476912400895,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005653015999996569,
"count": 1,
"is_parallel": true,
"self": 0.003978053999844633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001674962000151936,
"count": 8,
"is_parallel": true,
"self": 0.001674962000151936
}
}
},
"UnityEnvironment.step": {
"total": 0.04765813399990293,
"count": 1,
"is_parallel": true,
"self": 0.0005224259999749847,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004901739999922938,
"count": 1,
"is_parallel": true,
"self": 0.0004901739999922938
},
"communicator.exchange": {
"total": 0.045026978000009876,
"count": 1,
"is_parallel": true,
"self": 0.045026978000009876
},
"steps_from_proto": {
"total": 0.0016185559999257748,
"count": 1,
"is_parallel": true,
"self": 0.0004709169998022844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011476390001234904,
"count": 8,
"is_parallel": true,
"self": 0.0011476390001234904
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 518.2907498979899,
"count": 31500,
"is_parallel": true,
"self": 14.437553175974813,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.815026046980734,
"count": 31500,
"is_parallel": true,
"self": 12.815026046980734
},
"communicator.exchange": {
"total": 432.01970813502703,
"count": 31500,
"is_parallel": true,
"self": 432.01970813502703
},
"steps_from_proto": {
"total": 59.01846254000736,
"count": 31500,
"is_parallel": true,
"self": 12.572338579994266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 46.4461239600131,
"count": 252000,
"is_parallel": true,
"self": 46.4461239600131
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 349.1730832030006,
"count": 31501,
"self": 1.075169043019855,
"children": {
"process_trajectory": {
"total": 82.57310347397959,
"count": 31501,
"self": 82.46444172897952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10866174500006309,
"count": 1,
"self": 0.10866174500006309
}
}
},
"_update_policy": {
"total": 265.52481068600116,
"count": 209,
"self": 104.70122470001297,
"children": {
"TorchPPOOptimizer.update": {
"total": 160.8235859859882,
"count": 11412,
"self": 160.8235859859882
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.899999895424116e-07,
"count": 1,
"self": 9.899999895424116e-07
},
"TrainerController._save_models": {
"total": 0.09852016900003946,
"count": 1,
"self": 0.0016686769999978424,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09685149200004162,
"count": 1,
"self": 0.09685149200004162
}
}
}
}
}
}
}