cthiriet's picture
First Push
cf3433a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5873121023178101,
"min": 0.5873121023178101,
"max": 2.8776166439056396,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6001.7421875,
"min": 5743.4130859375,
"max": 29596.287109375,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.320497512817383,
"min": 0.3165181279182434,
"max": 13.346643447875977,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2597.4970703125,
"min": 61.404518127441406,
"max": 2730.20263671875,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04042731446800948,
"min": 0.03864792560045771,
"max": 0.0543256036387902,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.1617092578720379,
"min": 0.15459170240183084,
"max": 0.271628018193951,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17447406332939863,
"min": 0.10754449265853812,
"max": 0.2994558504472176,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6978962533175945,
"min": 0.4301779706341525,
"max": 1.497279252236088,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.311287896239998e-05,
"min": 6.311287896239998e-05,
"max": 0.00029675280108239997,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0002524515158495999,
"min": 0.0002524515158495999,
"max": 0.001454064015312,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.12103760000000001,
"min": 0.12103760000000001,
"max": 0.19891760000000003,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.48415040000000004,
"min": 0.48415040000000004,
"max": 0.984688,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00105977624,
"min": 0.00105977624,
"max": 0.00494598824,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00423910496,
"min": 0.00423910496,
"max": 0.0242359312,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.386363636363637,
"min": 3.0681818181818183,
"max": 26.386363636363637,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1161.0,
"min": 135.0,
"max": 1441.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.386363636363637,
"min": 3.0681818181818183,
"max": 26.386363636363637,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1161.0,
"min": 135.0,
"max": 1441.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679786025",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679786925"
},
"total": 899.543656866,
"count": 1,
"self": 0.004955608000045686,
"children": {
"run_training.setup": {
"total": 0.11370095000000902,
"count": 1,
"self": 0.11370095000000902
},
"TrainerController.start_learning": {
"total": 899.425000308,
"count": 1,
"self": 1.8777937260049384,
"children": {
"TrainerController._reset_env": {
"total": 9.79628939600002,
"count": 1,
"self": 9.79628939600002
},
"TrainerController.advance": {
"total": 887.614315038995,
"count": 36798,
"self": 0.6274243129998922,
"children": {
"env_step": {
"total": 886.9868907259951,
"count": 36798,
"self": 612.9160585160167,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.518237752001,
"count": 36798,
"self": 4.460513541009789,
"children": {
"TorchPolicy.evaluate": {
"total": 269.05772421099124,
"count": 36798,
"self": 269.05772421099124
}
}
},
"workers": {
"total": 0.5525944579773068,
"count": 36797,
"self": 0.0,
"children": {
"worker_root": {
"total": 895.7632007220026,
"count": 36797,
"is_parallel": true,
"self": 404.99374099399586,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006248698000035802,
"count": 1,
"is_parallel": true,
"self": 0.00464364299995168,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016050550000841213,
"count": 10,
"is_parallel": true,
"self": 0.0016050550000841213
}
}
},
"UnityEnvironment.step": {
"total": 0.03538946300000134,
"count": 1,
"is_parallel": true,
"self": 0.0005583369999726528,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024463300002253163,
"count": 1,
"is_parallel": true,
"self": 0.00024463300002253163
},
"communicator.exchange": {
"total": 0.03251898600001368,
"count": 1,
"is_parallel": true,
"self": 0.03251898600001368
},
"steps_from_proto": {
"total": 0.0020675069999924744,
"count": 1,
"is_parallel": true,
"self": 0.0004112799999802519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016562270000122226,
"count": 10,
"is_parallel": true,
"self": 0.0016562270000122226
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 490.7694597280067,
"count": 36796,
"is_parallel": true,
"self": 19.456491647977998,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.298666377005475,
"count": 36796,
"is_parallel": true,
"self": 10.298666377005475
},
"communicator.exchange": {
"total": 397.0950145830155,
"count": 36796,
"is_parallel": true,
"self": 397.0950145830155
},
"steps_from_proto": {
"total": 63.919287120007766,
"count": 36796,
"is_parallel": true,
"self": 12.63411040098481,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.28517671902296,
"count": 367960,
"is_parallel": true,
"self": 51.28517671902296
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00021386999992500932,
"count": 1,
"self": 0.00021386999992500932,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 881.1373914719685,
"count": 841953,
"is_parallel": true,
"self": 21.111690848929697,
"children": {
"process_trajectory": {
"total": 528.1684591510384,
"count": 841953,
"is_parallel": true,
"self": 525.4359121230385,
"children": {
"RLTrainer._checkpoint": {
"total": 2.7325470279998854,
"count": 8,
"is_parallel": true,
"self": 2.7325470279998854
}
}
},
"_update_policy": {
"total": 331.8572414720004,
"count": 184,
"is_parallel": true,
"self": 152.52067994600662,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.33656152599377,
"count": 4416,
"is_parallel": true,
"self": 179.33656152599377
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.136388277000151,
"count": 1,
"self": 0.0010142660000838077,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13537401100006718,
"count": 1,
"self": 0.13537401100006718
}
}
}
}
}
}
}