ppo-Pyramids / run_logs /timers.json
email81227's picture
Init
24e7684
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3666500449180603,
"min": 0.365275114774704,
"max": 1.4103314876556396,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11140.294921875,
"min": 10812.1435546875,
"max": 42783.81640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5927366018295288,
"min": -0.09950734674930573,
"max": 0.7072423100471497,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 166.55899047851562,
"min": -23.98126983642578,
"max": 203.685791015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007375817280262709,
"min": -0.15236683189868927,
"max": 0.36191678047180176,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.0726046562194824,
"min": -42.20561218261719,
"max": 85.77427673339844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848967331335247,
"min": 0.06385736586526036,
"max": 0.07223557148351144,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9588554263869346,
"min": 0.49361261027703196,
"max": 1.0455625720399744,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016851221644306552,
"min": 0.0008628091922366876,
"max": 0.016851221644306552,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23591710302029173,
"min": 0.006039664345656813,
"max": 0.24718905597304305,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.599590323978567e-06,
"min": 7.599590323978567e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010639426453569993,
"min": 0.00010639426453569993,
"max": 0.0033785570738144,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253316428571428,
"min": 0.10253316428571428,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354643,
"min": 1.3691136000000002,
"max": 2.5261856000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002630631121428571,
"min": 0.0002630631121428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003682883569999999,
"min": 0.003682883569999999,
"max": 0.11264594144,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009887446649372578,
"min": 0.009700154885649681,
"max": 0.5670167803764343,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13842424750328064,
"min": 0.13580216467380524,
"max": 3.9691174030303955,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 302.4059405940594,
"min": 264.7787610619469,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30543.0,
"min": 15984.0,
"max": 33477.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6381702853901552,
"min": -1.0000000521540642,
"max": 1.735221225076017,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 165.45519882440567,
"min": -32.000001668930054,
"max": 196.07999843358994,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6381702853901552,
"min": -1.0000000521540642,
"max": 1.735221225076017,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 165.45519882440567,
"min": -32.000001668930054,
"max": 196.07999843358994,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03118187372672422,
"min": 0.027140908946558233,
"max": 12.443020125851035,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1493692463991465,
"min": 2.8974459202800062,
"max": 199.08832201361656,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682275494",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682277846"
},
"total": 2351.741900282,
"count": 1,
"self": 0.4407295290006914,
"children": {
"run_training.setup": {
"total": 0.11050554300004478,
"count": 1,
"self": 0.11050554300004478
},
"TrainerController.start_learning": {
"total": 2351.1906652099997,
"count": 1,
"self": 1.4478382790252908,
"children": {
"TrainerController._reset_env": {
"total": 4.475606990000188,
"count": 1,
"self": 4.475606990000188
},
"TrainerController.advance": {
"total": 2345.168154553974,
"count": 64210,
"self": 1.5859775070994147,
"children": {
"env_step": {
"total": 1714.9181611279012,
"count": 64210,
"self": 1600.5561538948468,
"children": {
"SubprocessEnvManager._take_step": {
"total": 113.43676104301767,
"count": 64210,
"self": 5.128247995074844,
"children": {
"TorchPolicy.evaluate": {
"total": 108.30851304794282,
"count": 62566,
"self": 108.30851304794282
}
}
},
"workers": {
"total": 0.9252461900366598,
"count": 64210,
"self": 0.0,
"children": {
"worker_root": {
"total": 2345.7025924619657,
"count": 64210,
"is_parallel": true,
"self": 867.1610060259945,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020848320000368403,
"count": 1,
"is_parallel": true,
"self": 0.0007272169996213051,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013576150004155352,
"count": 8,
"is_parallel": true,
"self": 0.0013576150004155352
}
}
},
"UnityEnvironment.step": {
"total": 0.10299721900014447,
"count": 1,
"is_parallel": true,
"self": 0.00571646100001999,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000479107000046497,
"count": 1,
"is_parallel": true,
"self": 0.000479107000046497
},
"communicator.exchange": {
"total": 0.09163864500010277,
"count": 1,
"is_parallel": true,
"self": 0.09163864500010277
},
"steps_from_proto": {
"total": 0.005163005999975212,
"count": 1,
"is_parallel": true,
"self": 0.003592226999671766,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001570779000303446,
"count": 8,
"is_parallel": true,
"self": 0.001570779000303446
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1478.541586435971,
"count": 64209,
"is_parallel": true,
"self": 33.95437562697339,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.890193911974166,
"count": 64209,
"is_parallel": true,
"self": 25.890193911974166
},
"communicator.exchange": {
"total": 1314.9918897530245,
"count": 64209,
"is_parallel": true,
"self": 1314.9918897530245
},
"steps_from_proto": {
"total": 103.7051271439991,
"count": 64209,
"is_parallel": true,
"self": 22.416614795071382,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.28851234892772,
"count": 513672,
"is_parallel": true,
"self": 81.28851234892772
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 628.6640159189737,
"count": 64210,
"self": 2.749028301927865,
"children": {
"process_trajectory": {
"total": 114.37113254304609,
"count": 64210,
"self": 114.15667400604593,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21445853700015505,
"count": 2,
"self": 0.21445853700015505
}
}
},
"_update_policy": {
"total": 511.5438550739998,
"count": 452,
"self": 326.9257647660304,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.6180903079694,
"count": 22833,
"self": 184.6180903079694
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0429998837935273e-06,
"count": 1,
"self": 1.0429998837935273e-06
},
"TrainerController._save_models": {
"total": 0.09906434399999853,
"count": 1,
"self": 0.001476240000101825,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0975881039998967,
"count": 1,
"self": 0.0975881039998967
}
}
}
}
}
}
}