yaystevek's picture
First Push
36d56c3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2836567461490631,
"min": 0.2836567461490631,
"max": 1.4694892168045044,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8432.5478515625,
"min": 8432.5478515625,
"max": 44578.42578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49994927644729614,
"min": -0.1305467188358307,
"max": 0.5332083106040955,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 137.48605346679688,
"min": -31.46175765991211,
"max": 148.23190307617188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.01603609137237072,
"min": -0.01603609137237072,
"max": 0.6299103498458862,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -4.4099249839782715,
"min": -4.4099249839782715,
"max": 149.28875732421875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07233204942825806,
"min": 0.06281718738986307,
"max": 0.07366803193513707,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0126486919956128,
"min": 0.4947505360863964,
"max": 1.0636879104228378,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017735935442033227,
"min": 0.00046141909700384985,
"max": 0.021521358250481716,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24830309618846516,
"min": 0.005998448261050048,
"max": 0.2542119495931319,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.370197543300007e-06,
"min": 7.370197543300007e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010318276560620009,
"min": 0.00010318276560620009,
"max": 0.0035087885304038993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245669999999998,
"min": 0.10245669999999998,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343937999999998,
"min": 1.3886848,
"max": 2.5695961000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002554243300000002,
"min": 0.0002554243300000002,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035759406200000033,
"min": 0.0035759406200000033,
"max": 0.11698265039,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017071325331926346,
"min": 0.016508202999830246,
"max": 0.7039014101028442,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23899856209754944,
"min": 0.23899856209754944,
"max": 4.927309989929199,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 341.0595238095238,
"min": 336.0,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28649.0,
"min": 15984.0,
"max": 32758.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.587590455299332,
"min": -1.0000000521540642,
"max": 1.6278870386235855,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 133.3575982451439,
"min": -30.99660161137581,
"max": 142.74779879301786,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.587590455299332,
"min": -1.0000000521540642,
"max": 1.6278870386235855,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 133.3575982451439,
"min": -30.99660161137581,
"max": 142.74779879301786,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06399763829280605,
"min": 0.061672155620496584,
"max": 15.730707893148065,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.375801616595709,
"min": 5.118788916501217,
"max": 251.69132629036903,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693509408",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693511732"
},
"total": 2323.430040903,
"count": 1,
"self": 0.5381145810006274,
"children": {
"run_training.setup": {
"total": 0.03974262399992767,
"count": 1,
"self": 0.03974262399992767
},
"TrainerController.start_learning": {
"total": 2322.852183698,
"count": 1,
"self": 1.438442913979543,
"children": {
"TrainerController._reset_env": {
"total": 4.250111778000019,
"count": 1,
"self": 4.250111778000019
},
"TrainerController.advance": {
"total": 2317.0679981280205,
"count": 63860,
"self": 1.44059738699616,
"children": {
"env_step": {
"total": 1636.3489704890226,
"count": 63860,
"self": 1523.0756310509892,
"children": {
"SubprocessEnvManager._take_step": {
"total": 112.39315418705041,
"count": 63860,
"self": 4.852765732069884,
"children": {
"TorchPolicy.evaluate": {
"total": 107.54038845498053,
"count": 62564,
"self": 107.54038845498053
}
}
},
"workers": {
"total": 0.8801852509830042,
"count": 63860,
"self": 0.0,
"children": {
"worker_root": {
"total": 2317.3007173260658,
"count": 63860,
"is_parallel": true,
"self": 911.8025160309294,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018380120000074385,
"count": 1,
"is_parallel": true,
"self": 0.000559856000563741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012781559994436975,
"count": 8,
"is_parallel": true,
"self": 0.0012781559994436975
}
}
},
"UnityEnvironment.step": {
"total": 0.053314542999942205,
"count": 1,
"is_parallel": true,
"self": 0.0006273129999954108,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005255620001207717,
"count": 1,
"is_parallel": true,
"self": 0.0005255620001207717
},
"communicator.exchange": {
"total": 0.05012770999996974,
"count": 1,
"is_parallel": true,
"self": 0.05012770999996974
},
"steps_from_proto": {
"total": 0.002033957999856284,
"count": 1,
"is_parallel": true,
"self": 0.00036881299979540927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016651450000608747,
"count": 8,
"is_parallel": true,
"self": 0.0016651450000608747
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1405.4982012951364,
"count": 63859,
"is_parallel": true,
"self": 34.57477382917409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.143688888997758,
"count": 63859,
"is_parallel": true,
"self": 23.143688888997758
},
"communicator.exchange": {
"total": 1240.7701623919672,
"count": 63859,
"is_parallel": true,
"self": 1240.7701623919672
},
"steps_from_proto": {
"total": 107.00957618499729,
"count": 63859,
"is_parallel": true,
"self": 20.941761565002935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.06781461999435,
"count": 510872,
"is_parallel": true,
"self": 86.06781461999435
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 679.2784302520017,
"count": 63860,
"self": 2.701362380014416,
"children": {
"process_trajectory": {
"total": 114.66758121898897,
"count": 63860,
"self": 114.46157129398875,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20600992500021675,
"count": 2,
"self": 0.20600992500021675
}
}
},
"_update_policy": {
"total": 561.9094866529983,
"count": 453,
"self": 364.5136580320068,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.39582862099155,
"count": 22803,
"self": 197.39582862099155
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.259997568733525e-07,
"count": 1,
"self": 9.259997568733525e-07
},
"TrainerController._save_models": {
"total": 0.09562995200030855,
"count": 1,
"self": 0.001476261000334489,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09415369099997406,
"count": 1,
"self": 0.09415369099997406
}
}
}
}
}
}
}