KoRiF's picture
First Push: run00 commit
4719ef2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4323994219303131,
"min": 0.42558759450912476,
"max": 1.4111346006393433,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13068.83984375,
"min": 12708.2802734375,
"max": 42808.1796875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989897.0,
"min": 29952.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989897.0,
"min": 29952.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.23714713752269745,
"min": -0.11596640944480896,
"max": 0.23943552374839783,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 60.235374450683594,
"min": -27.947904586791992,
"max": 61.295494079589844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.024222437292337418,
"min": -0.0004362513718660921,
"max": 0.5149962902069092,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.152499198913574,
"min": -0.10949909687042236,
"max": 122.05412292480469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06984255315091249,
"min": 0.06586573332272104,
"max": 0.07364441527634438,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9777957441127748,
"min": 0.47621837681883294,
"max": 1.0557523152674548,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011547288048276228,
"min": 0.00032395096276193874,
"max": 0.011547288048276228,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16166203267586718,
"min": 0.0035634605903813264,
"max": 0.16166203267586718,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.6062331789071375e-06,
"min": 7.6062331789071375e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010648726450469992,
"min": 0.00010648726450469992,
"max": 0.002952448015850699,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253537857142858,
"min": 0.10253537857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354953000000001,
"min": 1.3691136000000002,
"max": 2.3173151000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026328431928571416,
"min": 0.00026328431928571416,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003685980469999998,
"min": 0.003685980469999998,
"max": 0.09842651507,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00925134215503931,
"min": 0.00925134215503931,
"max": 0.43075990676879883,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1295187920331955,
"min": 0.1295187920331955,
"max": 3.015319347381592,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 595.2173913043479,
"min": 595.2173913043479,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27380.0,
"min": 15984.0,
"max": 33324.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1003173641536548,
"min": -1.0000000521540642,
"max": 1.1003173641536548,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 50.614598751068115,
"min": -32.000001668930054,
"max": 50.614598751068115,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1003173641536548,
"min": -1.0000000521540642,
"max": 1.1003173641536548,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 50.614598751068115,
"min": -32.000001668930054,
"max": 50.614598751068115,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.057707393316693764,
"min": 0.057707393316693764,
"max": 9.031589467078447,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6545400925679132,
"min": 2.6545400925679132,
"max": 144.50543147325516,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674983791",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674985678"
},
"total": 1886.977632475,
"count": 1,
"self": 0.42595810400007394,
"children": {
"run_training.setup": {
"total": 0.10376513299979706,
"count": 1,
"self": 0.10376513299979706
},
"TrainerController.start_learning": {
"total": 1886.4479092380002,
"count": 1,
"self": 1.162080894992414,
"children": {
"TrainerController._reset_env": {
"total": 5.991686069000025,
"count": 1,
"self": 5.991686069000025
},
"TrainerController.advance": {
"total": 1879.1853473600077,
"count": 63250,
"self": 1.2101099029569014,
"children": {
"env_step": {
"total": 1227.2462294220786,
"count": 63250,
"self": 1124.0561617401127,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.45223329203145,
"count": 63250,
"self": 4.329887695001162,
"children": {
"TorchPolicy.evaluate": {
"total": 98.12234559703029,
"count": 62562,
"self": 33.16463572095972,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.95770987607057,
"count": 62562,
"self": 64.95770987607057
}
}
}
}
},
"workers": {
"total": 0.7378343899345055,
"count": 63250,
"self": 0.0,
"children": {
"worker_root": {
"total": 1882.7261828861328,
"count": 63250,
"is_parallel": true,
"self": 855.1712093141296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001976140999886411,
"count": 1,
"is_parallel": true,
"self": 0.0007039239990263013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012722170008601097,
"count": 8,
"is_parallel": true,
"self": 0.0012722170008601097
}
}
},
"UnityEnvironment.step": {
"total": 0.045827803000065614,
"count": 1,
"is_parallel": true,
"self": 0.0005458510004245909,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004494749996410974,
"count": 1,
"is_parallel": true,
"self": 0.0004494749996410974
},
"communicator.exchange": {
"total": 0.04322895499990409,
"count": 1,
"is_parallel": true,
"self": 0.04322895499990409
},
"steps_from_proto": {
"total": 0.0016035220000958361,
"count": 1,
"is_parallel": true,
"self": 0.0004287870001462579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011747349999495782,
"count": 8,
"is_parallel": true,
"self": 0.0011747349999495782
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1027.5549735720033,
"count": 63249,
"is_parallel": true,
"self": 27.399509397981546,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.155437891039583,
"count": 63249,
"is_parallel": true,
"self": 23.155437891039583
},
"communicator.exchange": {
"total": 883.8404013919421,
"count": 63249,
"is_parallel": true,
"self": 883.8404013919421
},
"steps_from_proto": {
"total": 93.15962489104004,
"count": 63249,
"is_parallel": true,
"self": 21.866928645941243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.2926962450988,
"count": 505992,
"is_parallel": true,
"self": 71.2926962450988
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.7290080349721,
"count": 63250,
"self": 2.041293625074104,
"children": {
"process_trajectory": {
"total": 144.1042223848999,
"count": 63250,
"self": 143.92428578789895,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17993659700096032,
"count": 2,
"self": 0.17993659700096032
}
}
},
"_update_policy": {
"total": 504.5834920249981,
"count": 428,
"self": 193.7105264219299,
"children": {
"TorchPPOOptimizer.update": {
"total": 310.8729656030682,
"count": 22854,
"self": 310.8729656030682
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0919993655988947e-06,
"count": 1,
"self": 1.0919993655988947e-06
},
"TrainerController._save_models": {
"total": 0.10879382200073451,
"count": 1,
"self": 0.0013750810012425063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.107418740999492,
"count": 1,
"self": 0.107418740999492
}
}
}
}
}
}
}