kambehmw's picture
First Push
665ee61
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9993101954460144,
"min": 0.9993101954460144,
"max": 2.866244316101074,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9563.3984375,
"min": 9563.3984375,
"max": 29416.265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.576066970825195,
"min": 0.27747318148612976,
"max": 12.576066970825195,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2452.3330078125,
"min": 53.82979965209961,
"max": 2550.501953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07345555118780454,
"min": 0.06034702958776524,
"max": 0.07458168667485462,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2938222047512182,
"min": 0.25231527956316757,
"max": 0.37290843337427315,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1992142649330929,
"min": 0.11732224045965055,
"max": 0.30621126919400454,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7968570597323716,
"min": 0.4692889618386022,
"max": 1.5310563459700228,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.931818181818183,
"min": 2.8863636363636362,
"max": 25.10909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1097.0,
"min": 127.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.931818181818183,
"min": 2.8863636363636362,
"max": 25.10909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1097.0,
"min": 127.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680443550",
"python_version": "3.9.0 (default, Nov 15 2020, 14:28:56) \n[GCC 7.3.0]",
"command_line_arguments": "/mnt/slurm-home/h_kambe/miniconda3/envs/py39-deep-rl-class/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.2+cu111",
"numpy_version": "1.21.2",
"end_time_seconds": "1680443756"
},
"total": 206.35802875366062,
"count": 1,
"self": 0.32059095706790686,
"children": {
"run_training.setup": {
"total": 0.05711450334638357,
"count": 1,
"self": 0.05711450334638357
},
"TrainerController.start_learning": {
"total": 205.98032329324633,
"count": 1,
"self": 0.2908992441371083,
"children": {
"TrainerController._reset_env": {
"total": 9.076232192106545,
"count": 1,
"self": 9.076232192106545
},
"TrainerController.advance": {
"total": 196.45816499460489,
"count": 18208,
"self": 0.15620674844831228,
"children": {
"env_step": {
"total": 196.30195824615657,
"count": 18208,
"self": 128.4781814245507,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.68129522632807,
"count": 18208,
"self": 0.8802304044365883,
"children": {
"TorchPolicy.evaluate": {
"total": 66.80106482189149,
"count": 18208,
"self": 66.80106482189149
}
}
},
"workers": {
"total": 0.14248159527778625,
"count": 18208,
"self": 0.0,
"children": {
"worker_root": {
"total": 205.5498168328777,
"count": 18208,
"is_parallel": true,
"self": 117.13109902944416,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010628467425704002,
"count": 1,
"is_parallel": true,
"self": 0.00029126182198524475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007715849205851555,
"count": 10,
"is_parallel": true,
"self": 0.0007715849205851555
}
}
},
"UnityEnvironment.step": {
"total": 0.020847042091190815,
"count": 1,
"is_parallel": true,
"self": 0.00025931745767593384,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008026650175452232,
"count": 1,
"is_parallel": true,
"self": 0.0008026650175452232
},
"communicator.exchange": {
"total": 0.019063190557062626,
"count": 1,
"is_parallel": true,
"self": 0.019063190557062626
},
"steps_from_proto": {
"total": 0.000721869058907032,
"count": 1,
"is_parallel": true,
"self": 0.00016753468662500381,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005543343722820282,
"count": 10,
"is_parallel": true,
"self": 0.0005543343722820282
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 88.41871780343354,
"count": 18207,
"is_parallel": true,
"self": 3.683372021652758,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.2040326464921236,
"count": 18207,
"is_parallel": true,
"self": 2.2040326464921236
},
"communicator.exchange": {
"total": 69.78663670737296,
"count": 18207,
"is_parallel": true,
"self": 69.78663670737296
},
"steps_from_proto": {
"total": 12.744676427915692,
"count": 18207,
"is_parallel": true,
"self": 2.797866209410131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.946810218505561,
"count": 182070,
"is_parallel": true,
"self": 9.946810218505561
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.832461804151535e-05,
"count": 1,
"self": 6.832461804151535e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 196.30190872680396,
"count": 126072,
"is_parallel": true,
"self": 1.0359489414840937,
"children": {
"process_trajectory": {
"total": 105.1361311962828,
"count": 126072,
"is_parallel": true,
"self": 104.29716876335442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8389624329283834,
"count": 4,
"is_parallel": true,
"self": 0.8389624329283834
}
}
},
"_update_policy": {
"total": 90.12982858903706,
"count": 90,
"is_parallel": true,
"self": 27.997073888778687,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.132754700258374,
"count": 4587,
"is_parallel": true,
"self": 62.132754700258374
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15495853777974844,
"count": 1,
"self": 0.005292384885251522,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14966615289449692,
"count": 1,
"self": 0.14966615289449692
}
}
}
}
}
}
}