{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4075660705566406, "min": 1.4075660705566406, "max": 1.4293336868286133, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69321.21875, "min": 68312.25, "max": 76080.4140625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 106.96544276457884, "min": 104.27426160337552, "max": 405.6910569105691, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49525.0, "min": 48951.0, "max": 50243.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999958.0, "min": 49774.0, "max": 1999958.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999958.0, "min": 49774.0, "max": 1999958.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.3100593090057373, "min": 0.0704134926199913, "max": 2.354785919189453, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1069.5574951171875, "min": 8.590446472167969, "max": 1093.516845703125, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.62864886838983, "min": 1.9503000793886967, "max": 3.659143411300399, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1680.0644260644913, "min": 237.936609685421, "max": 1698.3010804057121, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.62864886838983, "min": 1.9503000793886967, "max": 3.659143411300399, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1680.0644260644913, "min": 237.936609685421, "max": 1698.3010804057121, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.01677426046032148, "min": 0.013943707008244625, "max": 0.021377448205991335, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.03354852092064296, "min": 0.02788741401648925, "max": 0.05665091521126063, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.04835579072435697, "min": 0.022203938538829487, "max": 0.05614446024927828, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.09671158144871395, "min": 0.044407877077658975, "max": 0.16843338074783484, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.94217368597499e-06, "min": 3.94217368597499e-06, "max": 0.0002953602015466001, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 7.88434737194998e-06, "min": 7.88434737194998e-06, "max": 0.0008442639185786999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10131402499999999, "min": 0.10131402499999999, "max": 0.1984534, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.20262804999999998, "min": 0.20262804999999998, "max": 0.5814212999999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.55698474999998e-05, "min": 7.55698474999998e-05, "max": 0.00492282466, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0001511396949999996, "min": 0.0001511396949999996, "max": 0.014072922869999999, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1692753819", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1692756409" }, "total": 2589.540690286, "count": 1, "self": 0.4902193419998184, "children": { "run_training.setup": { "total": 0.043547620000026654, "count": 1, "self": 0.043547620000026654 }, "TrainerController.start_learning": { "total": 2589.006923324, "count": 1, "self": 4.846563719141159, "children": { "TrainerController._reset_env": { "total": 4.30752451699999, "count": 1, "self": 4.30752451699999 }, "TrainerController.advance": { "total": 2579.723552740859, "count": 230235, "self": 5.259380008871176, "children": { "env_step": { "total": 1998.2664047241108, "count": 230235, "self": 1687.3728217310797, "children": { "SubprocessEnvManager._take_step": { "total": 307.78703181103924, "count": 230235, "self": 17.80349465097845, "children": { "TorchPolicy.evaluate": { "total": 289.9835371600608, "count": 222802, "self": 289.9835371600608 } } }, "workers": { "total": 3.106551181991847, "count": 230235, "self": 0.0, "children": { "worker_root": { "total": 2580.882450069147, "count": 230235, "is_parallel": true, "self": 1206.1625066411036, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009556869999869377, "count": 1, "is_parallel": true, "self": 0.0002983159999985219, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006573709999884159, "count": 2, "is_parallel": true, "self": 0.0006573709999884159 } } }, "UnityEnvironment.step": { "total": 0.028550101000007544, "count": 1, "is_parallel": true, "self": 0.00035991599997942103, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002275620000204981, "count": 1, "is_parallel": true, "self": 0.0002275620000204981 }, "communicator.exchange": { "total": 0.02723165700001573, "count": 1, "is_parallel": true, "self": 0.02723165700001573 }, "steps_from_proto": { "total": 0.0007309659999918949, "count": 1, "is_parallel": true, "self": 0.00019747200002484533, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005334939999670496, "count": 2, "is_parallel": true, "self": 0.0005334939999670496 } } } } } } }, "UnityEnvironment.step": { "total": 1374.7199434280435, "count": 230234, "is_parallel": true, "self": 42.20831402499266, "children": { "UnityEnvironment._generate_step_input": { "total": 86.16023797505892, "count": 230234, "is_parallel": true, "self": 86.16023797505892 }, "communicator.exchange": { "total": 1143.0921464429196, "count": 230234, "is_parallel": true, "self": 1143.0921464429196 }, "steps_from_proto": { "total": 103.25924498507237, "count": 230234, "is_parallel": true, "self": 35.73554114699135, "children": { "_process_rank_one_or_two_observation": { "total": 67.52370383808102, "count": 460468, "is_parallel": true, "self": 67.52370383808102 } } } } } } } } } } }, "trainer_advance": { "total": 576.1977680078769, "count": 230235, "self": 7.452969028903794, "children": { "process_trajectory": { "total": 136.96136128497255, "count": 230235, "self": 135.58578068097233, "children": { "RLTrainer._checkpoint": { "total": 1.3755806040002199, "count": 10, "self": 1.3755806040002199 } } }, "_update_policy": { "total": 431.78343769400055, "count": 96, "self": 368.64476604399846, "children": { "TorchPPOOptimizer.update": { "total": 63.138671650002095, "count": 2880, "self": 63.138671650002095 } } } } } } }, "trainer_threads": { "total": 1.2830000741814729e-06, "count": 1, "self": 1.2830000741814729e-06 }, "TrainerController._save_models": { "total": 0.12928106400022443, "count": 1, "self": 0.0021375060000536905, "children": { "RLTrainer._checkpoint": { "total": 0.12714355800017074, "count": 1, "self": 0.12714355800017074 } } } } } } }