Kevin King
First Push
6d2333a
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2870231866836548,
"min": 1.2870231866836548,
"max": 2.8589818477630615,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12345.126953125,
"min": 12345.126953125,
"max": 29278.833984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.226325035095215,
"min": 0.40360695123672485,
"max": 12.226325035095215,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2384.13330078125,
"min": 78.29975128173828,
"max": 2454.4501953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07853997716972348,
"min": 0.06340615681113501,
"max": 0.07853997716972348,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.31415990867889393,
"min": 0.25362462724454005,
"max": 0.36586499466633826,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.311422145907201,
"min": 0.17737723324520915,
"max": 0.37785175255700654,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.245688583628804,
"min": 0.7095089329808366,
"max": 1.8892587627850328,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.429097306000008e-06,
"min": 9.429097306000008e-06,
"max": 0.0003405290027059999,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.771638922400003e-05,
"min": 3.771638922400003e-05,
"max": 0.00161602003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10215519999999999,
"min": 0.10215519999999999,
"max": 0.17783519999999997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40862079999999995,
"min": 0.40862079999999995,
"max": 0.869376,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.977272727272727,
"min": 3.3863636363636362,
"max": 24.30909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1055.0,
"min": 149.0,
"max": 1337.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.977272727272727,
"min": 3.3863636363636362,
"max": 24.30909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1055.0,
"min": 149.0,
"max": 1337.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687985637",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687986097"
},
"total": 460.083861952,
"count": 1,
"self": 0.7487914550000028,
"children": {
"run_training.setup": {
"total": 0.05047031100002641,
"count": 1,
"self": 0.05047031100002641
},
"TrainerController.start_learning": {
"total": 459.284600186,
"count": 1,
"self": 0.5400151439947649,
"children": {
"TrainerController._reset_env": {
"total": 4.960689653000031,
"count": 1,
"self": 4.960689653000031
},
"TrainerController.advance": {
"total": 453.56413772800505,
"count": 18226,
"self": 0.2439246540084241,
"children": {
"env_step": {
"total": 453.3202130739966,
"count": 18226,
"self": 331.913896292992,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.15331122000265,
"count": 18226,
"self": 1.6521426480106811,
"children": {
"TorchPolicy.evaluate": {
"total": 119.50116857199197,
"count": 18226,
"self": 119.50116857199197
}
}
},
"workers": {
"total": 0.25300556100199856,
"count": 18226,
"self": 0.0,
"children": {
"worker_root": {
"total": 457.6707518440012,
"count": 18226,
"is_parallel": true,
"self": 215.0986567189982,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005449989999988247,
"count": 1,
"is_parallel": true,
"self": 0.0039680309999425845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014819590000456628,
"count": 10,
"is_parallel": true,
"self": 0.0014819590000456628
}
}
},
"UnityEnvironment.step": {
"total": 0.06413202099997761,
"count": 1,
"is_parallel": true,
"self": 0.0005979630000183533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041615399999272995,
"count": 1,
"is_parallel": true,
"self": 0.00041615399999272995
},
"communicator.exchange": {
"total": 0.060875344000010045,
"count": 1,
"is_parallel": true,
"self": 0.060875344000010045
},
"steps_from_proto": {
"total": 0.002242559999956484,
"count": 1,
"is_parallel": true,
"self": 0.00043483999991167366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018077200000448101,
"count": 10,
"is_parallel": true,
"self": 0.0018077200000448101
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 242.572095125003,
"count": 18225,
"is_parallel": true,
"self": 10.0850628660055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.087425390000533,
"count": 18225,
"is_parallel": true,
"self": 5.087425390000533
},
"communicator.exchange": {
"total": 193.76781355099467,
"count": 18225,
"is_parallel": true,
"self": 193.76781355099467
},
"steps_from_proto": {
"total": 33.63179331800228,
"count": 18225,
"is_parallel": true,
"self": 6.042327713987731,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.58946560401455,
"count": 182250,
"is_parallel": true,
"self": 27.58946560401455
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014500200006750674,
"count": 1,
"self": 0.00014500200006750674,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 450.1530366360366,
"count": 431805,
"is_parallel": true,
"self": 9.381634067062407,
"children": {
"process_trajectory": {
"total": 246.1842357189737,
"count": 431805,
"is_parallel": true,
"self": 244.46956379397375,
"children": {
"RLTrainer._checkpoint": {
"total": 1.7146719249999478,
"count": 4,
"is_parallel": true,
"self": 1.7146719249999478
}
}
},
"_update_policy": {
"total": 194.58716685000047,
"count": 90,
"is_parallel": true,
"self": 75.7531142710032,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.83405257899727,
"count": 4587,
"is_parallel": true,
"self": 118.83405257899727
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2196126590000631,
"count": 1,
"self": 0.0011579890000348314,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21845467000002827,
"count": 1,
"self": 0.21845467000002827
}
}
}
}
}
}
}