pyramids / run_logs /timers.json
lupoplon's picture
First pyramid push
c6393e2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3509610891342163,
"min": 0.3087957203388214,
"max": 0.36777380108833313,
"count": 14
},
"Pyramids.Policy.Entropy.sum": {
"value": 10590.6015625,
"min": 3339.9345703125,
"max": 11009.6767578125,
"count": 14
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 261.5739130434783,
"min": 208.8709677419355,
"max": 333.15555555555557,
"count": 14
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30081.0,
"min": 6475.0,
"max": 30810.0,
"count": 14
},
"Pyramids.Step.mean": {
"value": 1799967.0,
"min": 1409913.0,
"max": 1799967.0,
"count": 14
},
"Pyramids.Step.sum": {
"value": 1799967.0,
"min": 1409913.0,
"max": 1799967.0,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6859020590782166,
"min": 0.578832745552063,
"max": 0.6859020590782166,
"count": 14
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 200.2834014892578,
"min": 53.8724479675293,
"max": 200.2834014892578,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 21.957359313964844,
"min": 2.9718246459960938,
"max": 21.957359313964844,
"count": 14
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6411.548828125,
"min": 264.4924011230469,
"max": 6411.548828125,
"count": 14
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.720510334675682,
"min": 1.629107512453551,
"max": 1.7911290161071285,
"count": 14
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 199.5791988223791,
"min": 55.524999499320984,
"max": 199.5791988223791,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.720510334675682,
"min": 1.629107512453551,
"max": 1.7911290161071285,
"count": 14
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 199.5791988223791,
"min": 55.524999499320984,
"max": 199.5791988223791,
"count": 14
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.22894134176172445,
"min": 0.22894134176172445,
"max": 0.3622605609319483,
"count": 14
},
"Pyramids.Policy.RndReward.sum": {
"value": 26.557195644360036,
"min": 7.3203921407694,
"max": 32.60345048387535,
"count": 14
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07270944114134181,
"min": 0.06266858351548708,
"max": 0.07270944114134181,
"count": 14
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0179321759787854,
"min": 0.27831401882576756,
"max": 1.0420461169269402,
"count": 14
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 21.68147027705397,
"min": 0.471697402519307,
"max": 21.68147027705397,
"count": 14
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 303.54058387875557,
"min": 1.9932998442091048,
"max": 303.54058387875557,
"count": 14
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.424611096591275e-06,
"min": 2.424611096591275e-06,
"max": 6.578182807274999e-05,
"count": 14
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.394455535227785e-05,
"min": 3.394455535227785e-05,
"max": 0.0008766917744364446,
"count": 14
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10080817063492063,
"min": 0.10080817063492063,
"max": 0.12192725000000001,
"count": 14
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4113143888888888,
"min": 0.48770900000000006,
"max": 1.7875094444444446,
"count": 14
},
"Pyramids.Policy.Beta.mean": {
"value": 9.073624642857163e-05,
"min": 9.073624642857163e-05,
"max": 0.002200532275,
"count": 14
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0012703074500000028,
"min": 0.0012703074500000028,
"max": 0.0293337992,
"count": 14
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.027669908478856087,
"min": 0.027669908478856087,
"max": 0.03692689538002014,
"count": 14
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3873787224292755,
"min": 0.14770758152008057,
"max": 0.49223601818084717,
"count": 14
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728319279",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728320196"
},
"total": 916.5547743960005,
"count": 1,
"self": 1.1799581080003918,
"children": {
"run_training.setup": {
"total": 0.06476543600001605,
"count": 1,
"self": 0.06476543600001605
},
"TrainerController.start_learning": {
"total": 915.3100508520001,
"count": 1,
"self": 0.5411470719573117,
"children": {
"TrainerController._reset_env": {
"total": 2.24071387100048,
"count": 1,
"self": 2.24071387100048
},
"TrainerController.advance": {
"total": 912.4310510820433,
"count": 26117,
"self": 0.5757557350352727,
"children": {
"env_step": {
"total": 710.9093044849951,
"count": 26117,
"self": 649.9562996429831,
"children": {
"SubprocessEnvManager._take_step": {
"total": 60.62759578393161,
"count": 26117,
"self": 1.8369352299632737,
"children": {
"TorchPolicy.evaluate": {
"total": 58.79066055396834,
"count": 25071,
"self": 58.79066055396834
}
}
},
"workers": {
"total": 0.3254090580803677,
"count": 26117,
"self": 0.0,
"children": {
"worker_root": {
"total": 913.3407672259527,
"count": 26117,
"is_parallel": true,
"self": 311.62826983305786,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031315679998442647,
"count": 1,
"is_parallel": true,
"self": 0.0009635180003897403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021680499994545244,
"count": 8,
"is_parallel": true,
"self": 0.0021680499994545244
}
}
},
"UnityEnvironment.step": {
"total": 0.09401083200009452,
"count": 1,
"is_parallel": true,
"self": 0.0006287340002018027,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005019809996156255,
"count": 1,
"is_parallel": true,
"self": 0.0005019809996156255
},
"communicator.exchange": {
"total": 0.09124633700048435,
"count": 1,
"is_parallel": true,
"self": 0.09124633700048435
},
"steps_from_proto": {
"total": 0.0016337799997927505,
"count": 1,
"is_parallel": true,
"self": 0.0003371640004843357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012966159993084148,
"count": 8,
"is_parallel": true,
"self": 0.0012966159993084148
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 601.7124973928949,
"count": 26116,
"is_parallel": true,
"self": 13.173401732853563,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.298754634973193,
"count": 26116,
"is_parallel": true,
"self": 9.298754634973193
},
"communicator.exchange": {
"total": 539.8244959001067,
"count": 26116,
"is_parallel": true,
"self": 539.8244959001067
},
"steps_from_proto": {
"total": 39.41584512496138,
"count": 26116,
"is_parallel": true,
"self": 8.129025020915833,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.286820104045546,
"count": 208928,
"is_parallel": true,
"self": 31.286820104045546
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 200.94599086201288,
"count": 26117,
"self": 1.07309663295473,
"children": {
"process_trajectory": {
"total": 54.34334475605556,
"count": 26117,
"self": 54.253657134056084,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08968762199947378,
"count": 1,
"self": 0.08968762199947378
}
}
},
"_update_policy": {
"total": 145.5295494730026,
"count": 189,
"self": 81.035269021967,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.49428045103559,
"count": 6066,
"self": 64.49428045103559
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5699997675255872e-06,
"count": 1,
"self": 1.5699997675255872e-06
},
"TrainerController._save_models": {
"total": 0.09713725699930364,
"count": 1,
"self": 0.0016087909989437321,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09552846600035991,
"count": 1,
"self": 0.09552846600035991
}
}
}
}
}
}
}