ppo-Huggy / run_logs /timers.json
saikiranp's picture
Huggy
33ee8a9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4052486419677734,
"min": 1.4052486419677734,
"max": 1.4266725778579712,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70256.8125,
"min": 68729.4375,
"max": 77493.4296875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 109.75892857142857,
"min": 97.25343811394892,
"max": 389.5891472868217,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49172.0,
"min": 49041.0,
"max": 50257.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999997.0,
"min": 49986.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999997.0,
"min": 49986.0,
"max": 1999997.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.294196605682373,
"min": 0.1307801753282547,
"max": 2.3597233295440674,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1030.09423828125,
"min": 16.7398624420166,
"max": 1174.8623046875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.611140880128058,
"min": 1.878663670271635,
"max": 3.8454045072495275,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1621.4022551774979,
"min": 240.4689497947693,
"max": 1897.3989784121513,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.611140880128058,
"min": 1.878663670271635,
"max": 3.8454045072495275,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1621.4022551774979,
"min": 240.4689497947693,
"max": 1897.3989784121513,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01927030493776935,
"min": 0.014023554455222135,
"max": 0.019888148088163388,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0385406098755387,
"min": 0.02804710891044427,
"max": 0.05833836422631672,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04992628240336974,
"min": 0.02224392993375659,
"max": 0.06284678857773542,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09985256480673949,
"min": 0.04448785986751318,
"max": 0.17118708056708176,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.261973579374993e-06,
"min": 4.261973579374993e-06,
"max": 0.00029536567654477497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.523947158749987e-06,
"min": 8.523947158749987e-06,
"max": 0.0008441770686076499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10142062500000001,
"min": 0.10142062500000001,
"max": 0.198455225,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20284125000000003,
"min": 0.20284125000000003,
"max": 0.5813923500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.088918749999989e-05,
"min": 8.088918749999989e-05,
"max": 0.004922915727499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016177837499999978,
"min": 0.00016177837499999978,
"max": 0.014071478264999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671528384",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671530619"
},
"total": 2235.3781056360003,
"count": 1,
"self": 0.38309998600016115,
"children": {
"run_training.setup": {
"total": 0.10707217399999536,
"count": 1,
"self": 0.10707217399999536
},
"TrainerController.start_learning": {
"total": 2234.887933476,
"count": 1,
"self": 3.8759958280479623,
"children": {
"TrainerController._reset_env": {
"total": 8.821190142000034,
"count": 1,
"self": 8.821190142000034
},
"TrainerController.advance": {
"total": 2222.075152219952,
"count": 230912,
"self": 4.1952491280271715,
"children": {
"env_step": {
"total": 1751.020026449938,
"count": 230912,
"self": 1471.6803988340905,
"children": {
"SubprocessEnvManager._take_step": {
"total": 276.7337545139449,
"count": 230912,
"self": 14.501645317848556,
"children": {
"TorchPolicy.evaluate": {
"total": 262.23210919609636,
"count": 222980,
"self": 65.13605106008367,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.0960581360127,
"count": 222980,
"self": 197.0960581360127
}
}
}
}
},
"workers": {
"total": 2.6058731019026595,
"count": 230912,
"self": 0.0,
"children": {
"worker_root": {
"total": 2226.977033661014,
"count": 230912,
"is_parallel": true,
"self": 1016.4231299550727,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002055599000016173,
"count": 1,
"is_parallel": true,
"self": 0.0003541829999562651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017014160000599077,
"count": 2,
"is_parallel": true,
"self": 0.0017014160000599077
}
}
},
"UnityEnvironment.step": {
"total": 0.028023077000000285,
"count": 1,
"is_parallel": true,
"self": 0.00029007899996713604,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021137400000270645,
"count": 1,
"is_parallel": true,
"self": 0.00021137400000270645
},
"communicator.exchange": {
"total": 0.026779949999991004,
"count": 1,
"is_parallel": true,
"self": 0.026779949999991004
},
"steps_from_proto": {
"total": 0.0007416740000394384,
"count": 1,
"is_parallel": true,
"self": 0.0002490760000455339,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004925979999939045,
"count": 2,
"is_parallel": true,
"self": 0.0004925979999939045
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1210.5539037059414,
"count": 230911,
"is_parallel": true,
"self": 35.2116830399018,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.01947066399799,
"count": 230911,
"is_parallel": true,
"self": 77.01947066399799
},
"communicator.exchange": {
"total": 1004.344182356959,
"count": 230911,
"is_parallel": true,
"self": 1004.344182356959
},
"steps_from_proto": {
"total": 93.97856764508265,
"count": 230911,
"is_parallel": true,
"self": 38.46682495208552,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.51174269299713,
"count": 461822,
"is_parallel": true,
"self": 55.51174269299713
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 466.85987664198694,
"count": 230912,
"self": 6.48704936304199,
"children": {
"process_trajectory": {
"total": 143.97738890394385,
"count": 230912,
"self": 142.66842361894396,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3089652849998856,
"count": 10,
"self": 1.3089652849998856
}
}
},
"_update_policy": {
"total": 316.3954383750011,
"count": 96,
"self": 262.3259128720064,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.06952550299468,
"count": 2880,
"self": 54.06952550299468
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2479999895731453e-06,
"count": 1,
"self": 1.2479999895731453e-06
},
"TrainerController._save_models": {
"total": 0.11559403799992651,
"count": 1,
"self": 0.0020307859999775246,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11356325199994899,
"count": 1,
"self": 0.11356325199994899
}
}
}
}
}
}
}