droid22's picture
First Push
388b853
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0416849851608276,
"min": 1.0416849851608276,
"max": 2.8730111122131348,
"count": 40
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10599.14453125,
"min": 10084.0576171875,
"max": 29454.109375,
"count": 40
},
"SnowballTarget.Step.mean": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Step.sum": {
"value": 399992.0,
"min": 9952.0,
"max": 399992.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.130623817443848,
"min": 0.289381206035614,
"max": 11.230559349060059,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2170.4716796875,
"min": 56.13995361328125,
"max": 2302.2646484375,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 40
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0688682206425309,
"min": 0.05710199580461645,
"max": 0.07429165160968243,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2754728825701236,
"min": 0.2284079832184658,
"max": 0.37145825804841215,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20234382422823532,
"min": 0.10474962289627715,
"max": 0.27552788390540606,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8093752969129413,
"min": 0.4189984915851086,
"max": 1.3386810939101612,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.2970987029999984e-06,
"min": 1.2970987029999984e-06,
"max": 9.864700135300001e-05,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.188394811999994e-06,
"min": 5.188394811999994e-06,
"max": 0.00048086001914,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101297,
"min": 0.101297,
"max": 0.19864700000000002,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.405188,
"min": 0.405188,
"max": 0.98086,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.472029999999992e-05,
"min": 7.472029999999992e-05,
"max": 0.004932485299999999,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00029888119999999966,
"min": 0.00029888119999999966,
"max": 0.024044914,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.59090909090909,
"min": 3.0,
"max": 22.136363636363637,
"count": 40
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 950.0,
"min": 132.0,
"max": 1215.0,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.59090909090909,
"min": 3.0,
"max": 22.136363636363637,
"count": 40
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 950.0,
"min": 132.0,
"max": 1215.0,
"count": 40
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679756672",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679757582"
},
"total": 909.717008863,
"count": 1,
"self": 0.7933130740001388,
"children": {
"run_training.setup": {
"total": 0.18333119900000838,
"count": 1,
"self": 0.18333119900000838
},
"TrainerController.start_learning": {
"total": 908.7403645899999,
"count": 1,
"self": 1.09179119198825,
"children": {
"TrainerController._reset_env": {
"total": 9.496369007999988,
"count": 1,
"self": 9.496369007999988
},
"TrainerController.advance": {
"total": 897.9243211960114,
"count": 36414,
"self": 0.5435109040219004,
"children": {
"env_step": {
"total": 897.3808102919895,
"count": 36414,
"self": 652.420863595949,
"children": {
"SubprocessEnvManager._take_step": {
"total": 244.26463223900902,
"count": 36414,
"self": 4.615086561015744,
"children": {
"TorchPolicy.evaluate": {
"total": 239.64954567799327,
"count": 36414,
"self": 239.64954567799327
}
}
},
"workers": {
"total": 0.6953144570315999,
"count": 36414,
"self": 0.0,
"children": {
"worker_root": {
"total": 905.974836733007,
"count": 36414,
"is_parallel": true,
"self": 433.5379857939995,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007040260999986003,
"count": 1,
"is_parallel": true,
"self": 0.0046135780000895465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024266829998964567,
"count": 10,
"is_parallel": true,
"self": 0.0024266829998964567
}
}
},
"UnityEnvironment.step": {
"total": 0.034218174999978146,
"count": 1,
"is_parallel": true,
"self": 0.0004686660000174925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003951859999915541,
"count": 1,
"is_parallel": true,
"self": 0.0003951859999915541
},
"communicator.exchange": {
"total": 0.03175645400000349,
"count": 1,
"is_parallel": true,
"self": 0.03175645400000349
},
"steps_from_proto": {
"total": 0.0015978689999656126,
"count": 1,
"is_parallel": true,
"self": 0.0003817210001102467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001216147999855366,
"count": 10,
"is_parallel": true,
"self": 0.001216147999855366
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 472.4368509390075,
"count": 36413,
"is_parallel": true,
"self": 18.833929333997446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.495369288991355,
"count": 36413,
"is_parallel": true,
"self": 10.495369288991355
},
"communicator.exchange": {
"total": 382.1101855460067,
"count": 36413,
"is_parallel": true,
"self": 382.1101855460067
},
"steps_from_proto": {
"total": 60.997366770012036,
"count": 36413,
"is_parallel": true,
"self": 11.913829619958904,
"children": {
"_process_rank_one_or_two_observation": {
"total": 49.08353715005313,
"count": 364130,
"is_parallel": true,
"self": 49.08353715005313
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002851220001502952,
"count": 1,
"self": 0.0002851220001502952,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 891.6988349539561,
"count": 772114,
"is_parallel": true,
"self": 18.933748354950694,
"children": {
"process_trajectory": {
"total": 491.21797334200596,
"count": 772114,
"is_parallel": true,
"self": 488.34927612500616,
"children": {
"RLTrainer._checkpoint": {
"total": 2.8686972169998057,
"count": 8,
"is_parallel": true,
"self": 2.8686972169998057
}
}
},
"_update_policy": {
"total": 381.5471132569994,
"count": 181,
"is_parallel": true,
"self": 145.71545479900095,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.83165845799846,
"count": 9228,
"is_parallel": true,
"self": 235.83165845799846
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22759807200009163,
"count": 1,
"self": 0.0012908180001431901,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22630725399994844,
"count": 1,
"self": 0.22630725399994844
}
}
}
}
}
}
}