Smone55's picture
Pyramid training attempt #1
d0a56b9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4132341146469116,
"min": 0.4132341146469116,
"max": 1.4406471252441406,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12397.0234375,
"min": 12397.0234375,
"max": 43703.47265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48929283022880554,
"min": -0.12017884850502014,
"max": 0.5124750137329102,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.59835815429688,
"min": -28.963102340698242,
"max": 138.36825561523438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018091673031449318,
"min": -0.026741527020931244,
"max": 0.25665026903152466,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.902843475341797,
"min": -7.113245964050293,
"max": 61.852718353271484,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06863422296531707,
"min": 0.0658006212940493,
"max": 0.07398144861675748,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.960879121514439,
"min": 0.5150755072245374,
"max": 1.1028333769063463,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014768146047011899,
"min": 0.00016870158462346566,
"max": 0.015481836002558749,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20675404465816657,
"min": 0.0020244190154815878,
"max": 0.225003132771235,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.464140369128568e-06,
"min": 7.464140369128568e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010449796516779996,
"min": 0.00010449796516779996,
"max": 0.0033821213726262997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024880142857143,
"min": 0.1024880142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348322000000002,
"min": 1.3886848,
"max": 2.5273737,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002585526271428571,
"min": 0.0002585526271428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00361973678,
"min": 0.00361973678,
"max": 0.11276463263,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01035116147249937,
"min": 0.01035116147249937,
"max": 0.4738081097602844,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14491626620292664,
"min": 0.14491626620292664,
"max": 3.3166568279266357,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 383.4938271604938,
"min": 353.45569620253167,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31063.0,
"min": 15984.0,
"max": 32820.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5177110896250348,
"min": -1.0000000521540642,
"max": 1.5916051736899786,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.93459825962782,
"min": -30.85000169277191,
"max": 123.51919828355312,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5177110896250348,
"min": -1.0000000521540642,
"max": 1.5916051736899786,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.93459825962782,
"min": -30.85000169277191,
"max": 123.51919828355312,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04121891087336813,
"min": 0.04121891087336813,
"max": 10.271578721702099,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.338731780742819,
"min": 3.2211904839641647,
"max": 164.34525954723358,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684331088",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684333206"
},
"total": 2118.8536290330003,
"count": 1,
"self": 0.4761866649996591,
"children": {
"run_training.setup": {
"total": 0.06294374999993124,
"count": 1,
"self": 0.06294374999993124
},
"TrainerController.start_learning": {
"total": 2118.3144986180005,
"count": 1,
"self": 1.2774693589994968,
"children": {
"TrainerController._reset_env": {
"total": 4.364788193000095,
"count": 1,
"self": 4.364788193000095
},
"TrainerController.advance": {
"total": 2112.581181055001,
"count": 63759,
"self": 1.30456350688155,
"children": {
"env_step": {
"total": 1480.8401113810694,
"count": 63759,
"self": 1376.3278705410298,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.76324061001128,
"count": 63759,
"self": 4.530021170024156,
"children": {
"TorchPolicy.evaluate": {
"total": 99.23321943998712,
"count": 62558,
"self": 99.23321943998712
}
}
},
"workers": {
"total": 0.7490002300282868,
"count": 63759,
"self": 0.0,
"children": {
"worker_root": {
"total": 2113.6116026639697,
"count": 63759,
"is_parallel": true,
"self": 843.6818232279738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002562724999961574,
"count": 1,
"is_parallel": true,
"self": 0.000748421999560378,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018143030004011962,
"count": 8,
"is_parallel": true,
"self": 0.0018143030004011962
}
}
},
"UnityEnvironment.step": {
"total": 0.04841044299996611,
"count": 1,
"is_parallel": true,
"self": 0.0005627749999348453,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004886090000582044,
"count": 1,
"is_parallel": true,
"self": 0.0004886090000582044
},
"communicator.exchange": {
"total": 0.045408102999999755,
"count": 1,
"is_parallel": true,
"self": 0.045408102999999755
},
"steps_from_proto": {
"total": 0.0019509559999733028,
"count": 1,
"is_parallel": true,
"self": 0.0005128550001245458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001438100999848757,
"count": 8,
"is_parallel": true,
"self": 0.001438100999848757
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1269.929779435996,
"count": 63758,
"is_parallel": true,
"self": 30.930485187087925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.173014170944043,
"count": 63758,
"is_parallel": true,
"self": 22.173014170944043
},
"communicator.exchange": {
"total": 1121.3699579790016,
"count": 63758,
"is_parallel": true,
"self": 1121.3699579790016
},
"steps_from_proto": {
"total": 95.45632209896235,
"count": 63758,
"is_parallel": true,
"self": 19.346193637020633,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.11012846194171,
"count": 510064,
"is_parallel": true,
"self": 76.11012846194171
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 630.4365061670499,
"count": 63759,
"self": 2.4547871341060272,
"children": {
"process_trajectory": {
"total": 102.98223040994662,
"count": 63759,
"self": 102.78236632394646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19986408600016148,
"count": 2,
"self": 0.19986408600016148
}
}
},
"_update_policy": {
"total": 524.9994886229972,
"count": 446,
"self": 340.117573063995,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.8819155590022,
"count": 22860,
"self": 184.8819155590022
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0109997674589977e-06,
"count": 1,
"self": 1.0109997674589977e-06
},
"TrainerController._save_models": {
"total": 0.09105900000031397,
"count": 1,
"self": 0.0012841980001212505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08977480200019272,
"count": 1,
"self": 0.08977480200019272
}
}
}
}
}
}
}