{ "name": "root", "gauges": { "Pyramids.Policy.Entropy.mean": { "value": 0.4947219789028168, "min": 0.49431654810905457, "max": 1.3941538333892822, "count": 33 }, "Pyramids.Policy.Entropy.sum": { "value": 14865.40625, "min": 14774.1328125, "max": 42293.05078125, "count": 33 }, "Pyramids.Step.mean": { "value": 989900.0, "min": 29952.0, "max": 989900.0, "count": 33 }, "Pyramids.Step.sum": { "value": 989900.0, "min": 29952.0, "max": 989900.0, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.mean": { "value": 0.5117311477661133, "min": -0.09958836436271667, "max": 0.5539224743843079, "count": 33 }, "Pyramids.Policy.ExtrinsicValueEstimate.sum": { "value": 140.72605895996094, "min": -23.901206970214844, "max": 152.88259887695312, "count": 33 }, "Pyramids.Policy.RndValueEstimate.mean": { "value": 0.017790410667657852, "min": -0.02425825409591198, "max": 0.2648756802082062, "count": 33 }, "Pyramids.Policy.RndValueEstimate.sum": { "value": 4.89236307144165, "min": -6.404179096221924, "max": 63.570159912109375, "count": 33 }, "Pyramids.Losses.PolicyLoss.mean": { "value": 0.06726003096105351, "min": 0.06481000737953678, "max": 0.07749080261592531, "count": 33 }, "Pyramids.Losses.PolicyLoss.sum": { "value": 0.9416404334547491, "min": 0.4785749078285011, "max": 1.034316851501239, "count": 33 }, "Pyramids.Losses.ValueLoss.mean": { "value": 0.014233913520246376, "min": 0.00024020661403899171, "max": 0.015983302794469925, "count": 33 }, "Pyramids.Losses.ValueLoss.sum": { "value": 0.19927478928344927, "min": 0.002402066140389917, "max": 0.22376623912257895, "count": 33 }, "Pyramids.Policy.LearningRate.mean": { "value": 7.4025975325000036e-06, "min": 7.4025975325000036e-06, "max": 0.00029515063018788575, "count": 33 }, "Pyramids.Policy.LearningRate.sum": { "value": 0.00010363636545500005, "min": 0.00010363636545500005, "max": 0.0030215477928175003, "count": 33 }, "Pyramids.Policy.Epsilon.mean": { "value": 0.10246750000000002, "min": 0.10246750000000002, "max": 0.19838354285714285, "count": 33 }, "Pyramids.Policy.Epsilon.sum": { "value": 1.4345450000000002, "min": 1.3691136000000002, "max": 2.3598540999999997, "count": 33 }, "Pyramids.Policy.Beta.mean": { "value": 0.00025650325000000013, "min": 0.00025650325000000013, "max": 0.00983851593142857, "count": 33 }, "Pyramids.Policy.Beta.sum": { "value": 0.003591045500000002, "min": 0.003591045500000002, "max": 0.10074753174999998, "count": 33 }, "Pyramids.Losses.RNDLoss.mean": { "value": 0.011489548720419407, "min": 0.011489548720419407, "max": 0.49717387557029724, "count": 33 }, "Pyramids.Losses.RNDLoss.sum": { "value": 0.16085368394851685, "min": 0.16085368394851685, "max": 3.480217218399048, "count": 33 }, "Pyramids.Environment.EpisodeLength.mean": { "value": 371.7439024390244, "min": 337.71264367816093, "max": 999.0, "count": 33 }, "Pyramids.Environment.EpisodeLength.sum": { "value": 30483.0, "min": 15984.0, "max": 32912.0, "count": 33 }, "Pyramids.Environment.CumulativeReward.mean": { "value": 1.530660966184081, "min": -1.0000000521540642, "max": 1.594544818260889, "count": 33 }, "Pyramids.Environment.CumulativeReward.sum": { "value": 125.51419922709465, "min": -32.000001668930054, "max": 138.72539918869734, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.mean": { "value": 1.530660966184081, "min": -1.0000000521540642, "max": 1.594544818260889, "count": 33 }, "Pyramids.Policy.ExtrinsicReward.sum": { "value": 125.51419922709465, "min": -32.000001668930054, "max": 138.72539918869734, "count": 33 }, "Pyramids.Policy.RndReward.mean": { "value": 0.04393152308134964, "min": 0.043744574081495236, "max": 10.124448403716087, "count": 33 }, "Pyramids.Policy.RndReward.sum": { "value": 3.6023848926706705, "min": 3.6023848926706705, "max": 161.9911744594574, "count": 33 }, "Pyramids.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 }, "Pyramids.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 33 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1724331427", "python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", "mlagents_version": "1.1.0.dev0", "mlagents_envs_version": "1.1.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "2.3.1+cu121", "numpy_version": "1.23.5", "end_time_seconds": "1724334045" }, "total": 2618.299379684, "count": 1, "self": 0.5396367970001847, "children": { "run_training.setup": { "total": 0.10401400000000649, "count": 1, "self": 0.10401400000000649 }, "TrainerController.start_learning": { "total": 2617.655728887, "count": 1, "self": 2.158611226046105, "children": { "TrainerController._reset_env": { "total": 2.481118510999977, "count": 1, "self": 2.481118510999977 }, "TrainerController.advance": { "total": 2612.9196889229534, "count": 63536, "self": 2.2398819829168133, "children": { "env_step": { "total": 1883.1549628880825, "count": 63536, "self": 1694.7234224530448, "children": { "SubprocessEnvManager._take_step": { "total": 187.15182436203463, "count": 63536, "self": 6.3785908850884425, "children": { "TorchPolicy.evaluate": { "total": 180.7732334769462, "count": 62555, "self": 180.7732334769462 } } }, "workers": { "total": 1.279716073003101, "count": 63536, "self": 0.0, "children": { "worker_root": { "total": 2610.6248320409873, "count": 63536, "is_parallel": true, "self": 1083.644460354009, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.003274492000173268, "count": 1, "is_parallel": true, "self": 0.0010586860003058973, "children": { "_process_rank_one_or_two_observation": { "total": 0.0022158059998673707, "count": 8, "is_parallel": true, "self": 0.0022158059998673707 } } }, "UnityEnvironment.step": { "total": 0.05563268000014432, "count": 1, "is_parallel": true, "self": 0.0007230130001971702, "children": { "UnityEnvironment._generate_step_input": { "total": 0.0004846330000418675, "count": 1, "is_parallel": true, "self": 0.0004846330000418675 }, "communicator.exchange": { "total": 0.05244895999999244, "count": 1, "is_parallel": true, "self": 0.05244895999999244 }, "steps_from_proto": { "total": 0.001976073999912842, "count": 1, "is_parallel": true, "self": 0.0004465020001589437, "children": { "_process_rank_one_or_two_observation": { "total": 0.0015295719997538981, "count": 8, "is_parallel": true, "self": 0.0015295719997538981 } } } } } } }, "UnityEnvironment.step": { "total": 1526.9803716869783, "count": 63535, "is_parallel": true, "self": 43.25341842305647, "children": { "UnityEnvironment._generate_step_input": { "total": 28.77956093798116, "count": 63535, "is_parallel": true, "self": 28.77956093798116 }, "communicator.exchange": { "total": 1331.5953607839972, "count": 63535, "is_parallel": true, "self": 1331.5953607839972 }, "steps_from_proto": { "total": 123.35203154194346, "count": 63535, "is_parallel": true, "self": 26.642491105838417, "children": { "_process_rank_one_or_two_observation": { "total": 96.70954043610504, "count": 508280, "is_parallel": true, "self": 96.70954043610504 } } } } } } } } } } }, "trainer_advance": { "total": 727.5248440519538, "count": 63536, "self": 3.71878911492081, "children": { "process_trajectory": { "total": 146.17663741703427, "count": 63536, "self": 145.9538230670346, "children": { "RLTrainer._checkpoint": { "total": 0.22281434999968042, "count": 2, "self": 0.22281434999968042 } } }, "_update_policy": { "total": 577.6294175199987, "count": 431, "self": 338.85613470200997, "children": { "TorchPPOOptimizer.update": { "total": 238.77328281798873, "count": 22869, "self": 238.77328281798873 } } } } } } }, "trainer_threads": { "total": 1.0990002010657918e-06, "count": 1, "self": 1.0990002010657918e-06 }, "TrainerController._save_models": { "total": 0.09630912800002989, "count": 1, "self": 0.0014756300001863565, "children": { "RLTrainer._checkpoint": { "total": 0.09483349799984353, "count": 1, "self": 0.09483349799984353 } } } } } } }