{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.400834560394287, "min": 1.400834560394287, "max": 1.4264026880264282, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 70466.1796875, "min": 69089.640625, "max": 76395.65625, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 85.04819277108433, "min": 74.88922610015175, "max": 397.13492063492066, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49413.0, "min": 49015.0, "max": 50039.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999998.0, "min": 49750.0, "max": 1999998.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999998.0, "min": 49750.0, "max": 1999998.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.4485244750976562, "min": 0.14035387337207794, "max": 2.5303995609283447, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 1422.5927734375, "min": 17.544233322143555, "max": 1618.607666015625, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.8196921147495866, "min": 1.7715250940322875, "max": 4.056997240994908, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 2219.24111866951, "min": 221.44063675403595, "max": 2549.6304953098297, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.8196921147495866, "min": 1.7715250940322875, "max": 4.056997240994908, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 2219.24111866951, "min": 221.44063675403595, "max": 2549.6304953098297, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016411527072583948, "min": 0.013990065602896114, "max": 0.0216104364120838, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.049234581217751844, "min": 0.02798013120579223, "max": 0.05724306039725585, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.056322639518313934, "min": 0.02113166100655993, "max": 0.059272315415243304, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1689679185549418, "min": 0.04226332201311986, "max": 0.17313722024361294, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.758148747316668e-06, "min": 3.758148747316668e-06, "max": 0.00029529352656882497, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 1.1274446241950004e-05, "min": 1.1274446241950004e-05, "max": 0.00084377056874315, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10125268333333333, "min": 0.10125268333333333, "max": 0.19843117500000002, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.30375805, "min": 0.20766645, "max": 0.5812568499999999, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.250889833333336e-05, "min": 7.250889833333336e-05, "max": 0.0049217156324999995, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00021752669500000007, "min": 0.00021752669500000007, "max": 0.014064716815, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1670370821", "python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1670373060" }, "total": 2239.793010078, "count": 1, "self": 0.39011656199954814, "children": { "run_training.setup": { "total": 0.10533825800007435, "count": 1, "self": 0.10533825800007435 }, "TrainerController.start_learning": { "total": 2239.297555258, "count": 1, "self": 4.000962914018601, "children": { "TrainerController._reset_env": { "total": 9.561084074000064, "count": 1, "self": 9.561084074000064 }, "TrainerController.advance": { "total": 2225.606969208982, "count": 233196, "self": 4.346713943867599, "children": { "env_step": { "total": 1745.7826211000406, "count": 233196, "self": 1459.4584872649434, "children": { "SubprocessEnvManager._take_step": { "total": 283.78720511806205, "count": 233196, "self": 14.91189688206282, "children": { "TorchPolicy.evaluate": { "total": 268.8753082359992, "count": 222937, "self": 67.41189006007869, "children": { "TorchPolicy.sample_actions": { "total": 201.46341817592054, "count": 222937, "self": 201.46341817592054 } } } } }, "workers": { "total": 2.5369287170352663, "count": 233196, "self": 0.0, "children": { "worker_root": { "total": 2231.3477673079933, "count": 233196, "is_parallel": true, "self": 1030.328633991026, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002102179999951659, "count": 1, "is_parallel": true, "self": 0.00036543999976856867, "children": { "_process_rank_one_or_two_observation": { "total": 0.0017367400001830902, "count": 2, "is_parallel": true, "self": 0.0017367400001830902 } } }, "UnityEnvironment.step": { "total": 0.02649363800003357, "count": 1, "is_parallel": true, "self": 0.00025844300023436517, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00018194899996615277, "count": 1, "is_parallel": true, "self": 0.00018194899996615277 }, "communicator.exchange": { "total": 0.025334246999932475, "count": 1, "is_parallel": true, "self": 0.025334246999932475 }, "steps_from_proto": { "total": 0.0007189989999005775, "count": 1, "is_parallel": true, "self": 0.00023940999994920276, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004795889999513747, "count": 2, "is_parallel": true, "self": 0.0004795889999513747 } } } } } } }, "UnityEnvironment.step": { "total": 1201.0191333169673, "count": 233195, "is_parallel": true, "self": 35.268761107795854, "children": { "UnityEnvironment._generate_step_input": { "total": 75.05779445203814, "count": 233195, "is_parallel": true, "self": 75.05779445203814 }, "communicator.exchange": { "total": 997.681426953055, "count": 233195, "is_parallel": true, "self": 997.681426953055 }, "steps_from_proto": { "total": 93.01115080407806, "count": 233195, "is_parallel": true, "self": 38.41529455895784, "children": { "_process_rank_one_or_two_observation": { "total": 54.59585624512022, "count": 466390, "is_parallel": true, "self": 54.59585624512022 } } } } } } } } } } }, "trainer_advance": { "total": 475.4776341650737, "count": 233196, "self": 5.886477873088779, "children": { "process_trajectory": { "total": 154.1455589369856, "count": 233196, "self": 153.64874828498466, "children": { "RLTrainer._checkpoint": { "total": 0.49681065200093144, "count": 4, "self": 0.49681065200093144 } } }, "_update_policy": { "total": 315.44559735499934, "count": 97, "self": 261.9413641190039, "children": { "TorchPPOOptimizer.update": { "total": 53.50423323599546, "count": 2910, "self": 53.50423323599546 } } } } } } }, "trainer_threads": { "total": 9.129998943535611e-07, "count": 1, "self": 9.129998943535611e-07 }, "TrainerController._save_models": { "total": 0.12853814799973406, "count": 1, "self": 0.002721903999827191, "children": { "RLTrainer._checkpoint": { "total": 0.12581624399990687, "count": 1, "self": 0.12581624399990687 } } } } } } }