{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.3999437093734741, "min": 1.3999437093734741, "max": 1.4285030364990234, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70085.3828125, "min": 66764.15625, "max": 79140.09375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 94.03396226415094, "min": 91.31608133086876, "max": 400.024, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49838.0, "min": 49126.0, "max": 50065.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999622.0, "min": 49656.0, "max": 1999622.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999622.0, "min": 49656.0, "max": 1999622.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.397925615310669, "min": 0.05017944425344467, "max": 2.4193365573883057, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1270.900634765625, "min": 6.222250938415527, "max": 1284.6795654296875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.6920536279678347, "min": 1.8245773524526627, "max": 3.9014876728989725, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1956.7884228229523, "min": 226.24759170413017, "max": 2055.776079416275, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.6920536279678347, "min": 1.8245773524526627, "max": 3.9014876728989725, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1956.7884228229523, "min": 226.24759170413017, "max": 2055.776079416275, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01705421171189098, "min": 0.015023611470436057, "max": 0.020261879435080725, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05116263513567294, "min": 0.030047222940872115, "max": 0.06003784423616405, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05097933519217703, "min": 0.02114582872018218, "max": 0.06138971615582704, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1529380055765311, "min": 0.04229165744036436, "max": 0.1831122178584337, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.1847489384499994e-06, "min": 3.1847489384499994e-06, "max": 0.000295344676551775, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.554246815349998e-06, "min": 9.554246815349998e-06, "max": 0.0008440066686644498, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10106155, "min": 0.10106155, "max": 0.19844822499999998, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30318465, "min": 0.2072853, "max": 0.58133555, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.297134500000001e-05, "min": 6.297134500000001e-05, "max": 0.0049225664275, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.000188914035, "min": 0.000188914035, "max": 0.014068643944999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1688413974", "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1688416554" }, "total": 2579.5717428669996, "count": 1, "self": 0.4415809649995026, "children": { "run_training.setup": { "total": 0.06368844400003582, "count": 1, "self": 0.06368844400003582 }, "TrainerController.start_learning": { "total": 2579.066473458, "count": 1, "self": 4.831018100967867, "children": { "TrainerController._reset_env": { "total": 4.033720960000096, "count": 1, "self": 4.033720960000096 }, "TrainerController.advance": { "total": 2570.0754704990322, "count": 231361, "self": 4.914061988830781, "children": { "env_step": { "total": 2022.4176952921218, "count": 231361, "self": 1699.4563710621471, "children": { "SubprocessEnvManager._take_step": { "total": 319.8591727119318, "count": 231361, "self": 18.347048281982893, "children": { "TorchPolicy.evaluate": { "total": 301.5121244299489, "count": 222926, "self": 301.5121244299489 } } }, "workers": { "total": 3.102151518042888, "count": 231361, "self": 0.0, "children": { "worker_root": { "total": 2570.7813655960886, "count": 231361, "is_parallel": true, "self": 1187.3743230310915, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009996259999525137, "count": 1, "is_parallel": true, "self": 0.000308626999981243, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006909989999712707, "count": 2, "is_parallel": true, "self": 0.0006909989999712707 } } }, "UnityEnvironment.step": { "total": 0.03056026000001566, "count": 1, "is_parallel": true, "self": 0.0004133719999117602, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002422910000632328, "count": 1, "is_parallel": true, "self": 0.0002422910000632328 }, "communicator.exchange": { "total": 0.029108437999980197, "count": 1, "is_parallel": true, "self": 0.029108437999980197 }, "steps_from_proto": { "total": 0.0007961590000604701, "count": 1, "is_parallel": true, "self": 0.00022087900015321793, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005752799999072522, "count": 2, "is_parallel": true, "self": 0.0005752799999072522 } } } } } } }, "UnityEnvironment.step": { "total": 1383.407042564997, "count": 231360, "is_parallel": true, "self": 41.01791864291499, "children": { "UnityEnvironment._generate_step_input": { "total": 87.54069668203351, "count": 231360, "is_parallel": true, "self": 87.54069668203351 }, "communicator.exchange": { "total": 1154.0104692830705, "count": 231360, "is_parallel": true, "self": 1154.0104692830705 }, "steps_from_proto": { "total": 100.83795795697813, "count": 231360, "is_parallel": true, "self": 37.912775251935045, "children": { "_process_rank_one_or_two_observation": { "total": 62.925182705043085, "count": 462720, "is_parallel": true, "self": 62.925182705043085 } } } } } } } } } } }, "trainer_advance": { "total": 542.7437132180795, "count": 231361, "self": 6.863624049178725, "children": { "process_trajectory": { "total": 142.80693770490097, "count": 231361, "self": 141.28448808790108, "children": { "RLTrainer._checkpoint": { "total": 1.522449616999893, "count": 10, "self": 1.522449616999893 } } }, "_update_policy": { "total": 393.0731514639998, "count": 97, "self": 332.0210478679818, "children": { "TorchPPOOptimizer.update": { "total": 61.052103596018014, "count": 2910, "self": 61.052103596018014 } } } } } } }, "trainer_threads": { "total": 9.989998943638057e-07, "count": 1, "self": 9.989998943638057e-07 }, "TrainerController._save_models": { "total": 0.12626289899981202, "count": 1, "self": 0.0021466819998749997, "children": { "RLTrainer._checkpoint": { "total": 0.12411621699993702, "count": 1, "self": 0.12411621699993702 } } } } } } }