{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.399813175201416, "min": 1.399813175201416, "max": 1.4258381128311157, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71212.6953125, "min": 67749.8515625, "max": 78581.53125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 80.29641693811075, "min": 71.0966810966811, "max": 400.824, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49302.0, "min": 49124.0, "max": 50103.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999979.0, "min": 49894.0, "max": 1999979.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999979.0, "min": 49894.0, "max": 1999979.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4706525802612305, "min": 0.110715351998806, "max": 2.5088210105895996, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1516.980712890625, "min": 13.728703498840332, "max": 1697.364990234375, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.7920406282142243, "min": 1.8181173510609134, "max": 4.030835138508147, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2328.3129457235336, "min": 225.44655153155327, "max": 2665.428489089012, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.7920406282142243, "min": 1.8181173510609134, "max": 4.030835138508147, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2328.3129457235336, "min": 225.44655153155327, "max": 2665.428489089012, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016909646920652853, "min": 0.013951941918882464, "max": 0.019242488412419333, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.05072894076195856, "min": 0.027903883837764928, "max": 0.057727465237258004, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.05324819187323252, "min": 0.022064437996596097, "max": 0.0583293691277504, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.15974457561969757, "min": 0.04412887599319219, "max": 0.1696402760843436, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.873648708816659e-06, "min": 3.873648708816659e-06, "max": 0.00029533815155394997, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1620946126449976e-05, "min": 1.1620946126449976e-05, "max": 0.0008441910186029999, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10129118333333338, "min": 0.10129118333333338, "max": 0.19844605000000004, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30387355000000016, "min": 0.20770819999999998, "max": 0.5813969999999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.443004833333323e-05, "min": 7.443004833333323e-05, "max": 0.004922457894999999, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0002232901449999997, "min": 0.0002232901449999997, "max": 0.014071710299999995, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1682465003", "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1682467482" }, "total": 2478.39916345, "count": 1, "self": 0.4479210179997608, "children": { "run_training.setup": { "total": 0.1182288070000368, "count": 1, "self": 0.1182288070000368 }, "TrainerController.start_learning": { "total": 2477.833013625, "count": 1, "self": 4.734263780009314, "children": { "TrainerController._reset_env": { "total": 3.961996576000047, "count": 1, "self": 3.961996576000047 }, "TrainerController.advance": { "total": 2469.012443841991, "count": 233303, "self": 5.188346829116654, "children": { "env_step": { "total": 1922.4210250749898, "count": 233303, "self": 1624.6235194570331, "children": { "SubprocessEnvManager._take_step": { "total": 294.6630824560066, "count": 233303, "self": 17.633886762898783, "children": { "TorchPolicy.evaluate": { "total": 277.0291956931078, "count": 223036, "self": 277.0291956931078 } } }, "workers": { "total": 3.1344231619499396, "count": 233303, "self": 0.0, "children": { "worker_root": { "total": 2469.4410244409737, "count": 233303, "is_parallel": true, "self": 1145.2777186070662, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.0009284129999969082, "count": 1, "is_parallel": true, "self": 0.00026633000004494534, "children": { "_process_rank_one_or_two_observation": { "total": 0.0006620829999519628, "count": 2, "is_parallel": true, "self": 0.0006620829999519628 } } }, "UnityEnvironment.step": { "total": 0.029443786000001637, "count": 1, "is_parallel": true, "self": 0.000314745999958177, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0002569590000121025, "count": 1, "is_parallel": true, "self": 0.0002569590000121025 }, "communicator.exchange": { "total": 0.028133712999988347, "count": 1, "is_parallel": true, "self": 0.028133712999988347 }, "steps_from_proto": { "total": 0.0007383680000430104, "count": 1, "is_parallel": true, "self": 0.00020272500000828586, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005356430000347245, "count": 2, "is_parallel": true, "self": 0.0005356430000347245 } } } } } } }, "UnityEnvironment.step": { "total": 1324.1633058339075, "count": 233302, "is_parallel": true, "self": 39.76315860212753, "children": { "UnityEnvironment._generate_step_input": { "total": 82.43119639593851, "count": 233302, "is_parallel": true, "self": 82.43119639593851 }, "communicator.exchange": { "total": 1108.7606108309124, "count": 233302, "is_parallel": true, "self": 1108.7606108309124 }, "steps_from_proto": { "total": 93.20834000492914, "count": 233302, "is_parallel": true, "self": 34.78264819095341, "children": { "_process_rank_one_or_two_observation": { "total": 58.42569181397573, "count": 466604, "is_parallel": true, "self": 58.42569181397573 } } } } } } } } } } }, "trainer_advance": { "total": 541.4030719378849, "count": 233303, "self": 7.22744367282462, "children": { "process_trajectory": { "total": 144.32088708606045, "count": 233303, "self": 142.77204345306006, "children": { "RLTrainer._checkpoint": { "total": 1.5488436330003879, "count": 10, "self": 1.5488436330003879 } } }, "_update_policy": { "total": 389.8547411789998, "count": 97, "self": 328.37150484099845, "children": { "TorchPPOOptimizer.update": { "total": 61.48323633800135, "count": 2910, "self": 61.48323633800135 } } } } } } }, "trainer_threads": { "total": 9.73000169324223e-07, "count": 1, "self": 9.73000169324223e-07 }, "TrainerController._save_models": { "total": 0.12430845399967438, "count": 1, "self": 0.0024240639995696256, "children": { "RLTrainer._checkpoint": { "total": 0.12188439000010476, "count": 1, "self": 0.12188439000010476 } } } } } } }