Hwilner's picture
sec Push
bedfc58
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2991029024124146,
"min": 1.2991029024124146,
"max": 2.884037494659424,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12160.90234375,
"min": 12160.90234375,
"max": 31014.328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199992.0,
"min": 9984.0,
"max": 199992.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199992.0,
"min": 9984.0,
"max": 199992.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.463836669921875,
"min": 0.2772822678089142,
"max": 6.463836669921875,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 976.0393676757812,
"min": 41.037776947021484,
"max": 976.0393676757812,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.80392156862745,
"min": 3.090909090909091,
"max": 25.80392156862745,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1316.0,
"min": 136.0,
"max": 1359.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.80392156862745,
"min": 3.090909090909091,
"max": 25.80392156862745,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1316.0,
"min": 136.0,
"max": 1359.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.05945388550963539,
"min": 0.05945388550963539,
"max": 0.06657232236554722,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.23781554203854155,
"min": 0.23781554203854155,
"max": 0.3284966726993195,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.183262244425714,
"min": 0.09850787479829576,
"max": 0.2549174259283713,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.733048977702856,
"min": 0.39403149919318303,
"max": 1.2277166165411473,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.00029175000275,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.3999999999999999,
"max": 0.4999999999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.623500000000002e-05,
"min": 3.623500000000002e-05,
"max": 0.0009727749999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00014494000000000009,
"min": 0.00014494000000000009,
"max": 0.004618850000000001,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696276818",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696277972"
},
"total": 1154.428818762,
"count": 1,
"self": 1.0753539990000718,
"children": {
"run_training.setup": {
"total": 0.08416951399999562,
"count": 1,
"self": 0.08416951399999562
},
"TrainerController.start_learning": {
"total": 1153.269295249,
"count": 1,
"self": 1.1662224659935418,
"children": {
"TrainerController._reset_env": {
"total": 4.575206643999991,
"count": 1,
"self": 4.575206643999991
},
"TrainerController.advance": {
"total": 1147.1329956800066,
"count": 18236,
"self": 0.510800703026689,
"children": {
"env_step": {
"total": 1146.62219497698,
"count": 18236,
"self": 999.805822083998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.34128918899745,
"count": 18236,
"self": 2.886309995996129,
"children": {
"TorchPolicy.evaluate": {
"total": 143.45497919300132,
"count": 18236,
"self": 143.45497919300132
}
}
},
"workers": {
"total": 0.47508370398441,
"count": 18236,
"self": 0.0,
"children": {
"worker_root": {
"total": 1150.283555413998,
"count": 18236,
"is_parallel": true,
"self": 780.1486684300111,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00773595199999022,
"count": 1,
"is_parallel": true,
"self": 0.004757176999987678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029787750000025426,
"count": 10,
"is_parallel": true,
"self": 0.0029787750000025426
}
}
},
"UnityEnvironment.step": {
"total": 0.060158758000000034,
"count": 1,
"is_parallel": true,
"self": 0.0008301029999699949,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005832270000212247,
"count": 1,
"is_parallel": true,
"self": 0.0005832270000212247
},
"communicator.exchange": {
"total": 0.054616455999990876,
"count": 1,
"is_parallel": true,
"self": 0.054616455999990876
},
"steps_from_proto": {
"total": 0.004128972000017939,
"count": 1,
"is_parallel": true,
"self": 0.0005346530000167604,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003594319000001178,
"count": 10,
"is_parallel": true,
"self": 0.003594319000001178
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 370.13488698398703,
"count": 18235,
"is_parallel": true,
"self": 15.883491138994145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.7538960629972,
"count": 18235,
"is_parallel": true,
"self": 7.7538960629972
},
"communicator.exchange": {
"total": 294.34221507598284,
"count": 18235,
"is_parallel": true,
"self": 294.34221507598284
},
"steps_from_proto": {
"total": 52.15528470601279,
"count": 18235,
"is_parallel": true,
"self": 10.470978443024336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 41.68430626298846,
"count": 182350,
"is_parallel": true,
"self": 41.68430626298846
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00040532899993195315,
"count": 1,
"self": 0.00040532899993195315,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1146.375508204033,
"count": 200925,
"is_parallel": true,
"self": 4.437364087048536,
"children": {
"process_trajectory": {
"total": 187.06477452898466,
"count": 200925,
"is_parallel": true,
"self": 163.72388005998377,
"children": {
"RLTrainer._checkpoint": {
"total": 23.340894469000887,
"count": 40,
"is_parallel": true,
"self": 23.340894469000887
}
}
},
"_update_policy": {
"total": 954.8733695879998,
"count": 90,
"is_parallel": true,
"self": 262.44180190999714,
"children": {
"TorchPPOOptimizer.update": {
"total": 692.4315676780027,
"count": 12600,
"is_parallel": true,
"self": 692.4315676780027
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.39446512999984407,
"count": 1,
"self": 0.003891258999829006,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39057387100001506,
"count": 1,
"self": 0.39057387100001506
}
}
}
}
}
}
}