ppo-Huggy / run_logs /timers.json
dcfidalgo's picture
Huggy
c0fbd1e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4079903364181519,
"min": 1.4079903364181519,
"max": 1.4265122413635254,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68766.25,
"min": 67947.984375,
"max": 75918.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 104.04989604989605,
"min": 89.4376130198915,
"max": 406.6747967479675,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 50048.0,
"min": 48814.0,
"max": 50242.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999810.0,
"min": 49817.0,
"max": 1999810.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999810.0,
"min": 49817.0,
"max": 1999810.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3961269855499268,
"min": -0.015536925755441189,
"max": 2.432816505432129,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1152.537109375,
"min": -1.8955049514770508,
"max": 1307.8060302734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.632701588395727,
"min": 1.8142023823056064,
"max": 3.8283403637785542,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1747.3294640183449,
"min": 221.332690641284,
"max": 2065.695347547531,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.632701588395727,
"min": 1.8142023823056064,
"max": 3.8283403637785542,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1747.3294640183449,
"min": 221.332690641284,
"max": 2065.695347547531,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01556914671461224,
"min": 0.011998171018300733,
"max": 0.02152308144335014,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03113829342922448,
"min": 0.023996342036601465,
"max": 0.05420544985002683,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049449754133820537,
"min": 0.023886641052862007,
"max": 0.05960163635512193,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09889950826764107,
"min": 0.04777328210572401,
"max": 0.17338562421500683,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.440998519700004e-06,
"min": 4.440998519700004e-06,
"max": 0.000295309726563425,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.881997039400008e-06,
"min": 8.881997039400008e-06,
"max": 0.0008438329687223499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10148030000000005,
"min": 0.10148030000000005,
"max": 0.19843657500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2029606000000001,
"min": 0.2029606000000001,
"max": 0.58127765,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.386697000000005e-05,
"min": 8.386697000000005e-05,
"max": 0.0049219850925,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001677339400000001,
"min": 0.0001677339400000001,
"max": 0.014065754735000005,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699273337",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699275814"
},
"total": 2477.532648086,
"count": 1,
"self": 0.8581486309999491,
"children": {
"run_training.setup": {
"total": 0.04424926600000845,
"count": 1,
"self": 0.04424926600000845
},
"TrainerController.start_learning": {
"total": 2476.630250189,
"count": 1,
"self": 4.88211746212437,
"children": {
"TrainerController._reset_env": {
"total": 8.661474005999992,
"count": 1,
"self": 8.661474005999992
},
"TrainerController.advance": {
"total": 2462.9230945668755,
"count": 231602,
"self": 4.755541033933696,
"children": {
"env_step": {
"total": 1954.3447599129645,
"count": 231602,
"self": 1612.1959377699789,
"children": {
"SubprocessEnvManager._take_step": {
"total": 339.172397134044,
"count": 231602,
"self": 17.068590137957983,
"children": {
"TorchPolicy.evaluate": {
"total": 322.103806996086,
"count": 222966,
"self": 322.103806996086
}
}
},
"workers": {
"total": 2.9764250089417033,
"count": 231602,
"self": 0.0,
"children": {
"worker_root": {
"total": 2468.9527001350225,
"count": 231602,
"is_parallel": true,
"self": 1155.1973128900372,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009051010000007409,
"count": 1,
"is_parallel": true,
"self": 0.0002643939999984468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006407070000022941,
"count": 2,
"is_parallel": true,
"self": 0.0006407070000022941
}
}
},
"UnityEnvironment.step": {
"total": 0.02831523199995445,
"count": 1,
"is_parallel": true,
"self": 0.0003171039999756431,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002132959999698869,
"count": 1,
"is_parallel": true,
"self": 0.0002132959999698869
},
"communicator.exchange": {
"total": 0.027089869000008093,
"count": 1,
"is_parallel": true,
"self": 0.027089869000008093
},
"steps_from_proto": {
"total": 0.0006949630000008256,
"count": 1,
"is_parallel": true,
"self": 0.0002009119999684117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004940510000324139,
"count": 2,
"is_parallel": true,
"self": 0.0004940510000324139
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.7553872449853,
"count": 231601,
"is_parallel": true,
"self": 41.2994857260594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.00423623099385,
"count": 231601,
"is_parallel": true,
"self": 85.00423623099385
},
"communicator.exchange": {
"total": 1094.6785461629684,
"count": 231601,
"is_parallel": true,
"self": 1094.6785461629684
},
"steps_from_proto": {
"total": 92.77311912496356,
"count": 231601,
"is_parallel": true,
"self": 32.2949340290059,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.478185095957656,
"count": 463202,
"is_parallel": true,
"self": 60.478185095957656
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 503.8227936199775,
"count": 231602,
"self": 6.887431806958318,
"children": {
"process_trajectory": {
"total": 152.88567115602035,
"count": 231602,
"self": 151.65682356502072,
"children": {
"RLTrainer._checkpoint": {
"total": 1.228847590999635,
"count": 10,
"self": 1.228847590999635
}
}
},
"_update_policy": {
"total": 344.04969065699885,
"count": 96,
"self": 281.4475129269944,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.60217773000443,
"count": 2880,
"self": 62.60217773000443
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5979999261617195e-06,
"count": 1,
"self": 1.5979999261617195e-06
},
"TrainerController._save_models": {
"total": 0.1635625560002154,
"count": 1,
"self": 0.003242940000291128,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16031961599992428,
"count": 1,
"self": 0.16031961599992428
}
}
}
}
}
}
}