ppo-PyramidsRND / run_logs /timers.json
dar-tau's picture
First Push
5e01011
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.193587064743042,
"min": 1.193587064743042,
"max": 1.5066683292388916,
"count": 2
},
"Pyramids.Policy.Entropy.sum": {
"value": 35750.3203125,
"min": 35750.3203125,
"max": 45706.2890625,
"count": 2
},
"Pyramids.Step.mean": {
"value": 59940.0,
"min": 29935.0,
"max": 59940.0,
"count": 2
},
"Pyramids.Step.sum": {
"value": 59940.0,
"min": 29935.0,
"max": 59940.0,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0320928730070591,
"min": -0.0320928730070591,
"max": 0.11832442134618759,
"count": 2
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -7.734382629394531,
"min": -7.734382629394531,
"max": 28.042888641357422,
"count": 2
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.30851003527641296,
"min": 0.19716422259807587,
"max": 0.30851003527641296,
"count": 2
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 74.35092163085938,
"min": 46.72792053222656,
"max": 74.35092163085938,
"count": 2
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06964125871025563,
"min": 0.06964125871025563,
"max": 0.07132725981879431,
"count": 2
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.766053845812812,
"min": 0.49929081873156017,
"max": 0.766053845812812,
"count": 2
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00302388335087176,
"min": 0.00302388335087176,
"max": 0.012563372225533883,
"count": 2
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.033262716859589356,
"min": 0.033262716859589356,
"max": 0.08794360557873718,
"count": 2
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0002955134378591576,
"min": 0.0002955134378591576,
"max": 0.00029841121481530953,
"count": 2
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.003250647816450733,
"min": 0.002088878503707167,
"max": 0.003250647816450733,
"count": 2
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1985044787878788,
"min": 0.1985044787878788,
"max": 0.19947040476190478,
"count": 2
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.1835492666666667,
"min": 1.3962928333333335,
"max": 2.1835492666666667,
"count": 2
},
"Pyramids.Policy.Beta.mean": {
"value": 0.009850597430909092,
"min": 0.009850597430909092,
"max": 0.009947093435714285,
"count": 2
},
"Pyramids.Policy.Beta.sum": {
"value": 0.10835657174000002,
"min": 0.06962965404999999,
"max": 0.10835657174000002,
"count": 2
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.16762539744377136,
"min": 0.16762539744377136,
"max": 0.39258456230163574,
"count": 2
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.8438793420791626,
"min": 1.8438793420791626,
"max": 2.74809193611145,
"count": 2
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 989.6969696969697,
"min": 984.3529411764706,
"max": 989.6969696969697,
"count": 2
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32660.0,
"min": 16734.0,
"max": 32660.0,
"count": 2
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9300364165595083,
"min": -0.9300364165595083,
"max": -0.8676471105393242,
"count": 2
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.691201746463776,
"min": -30.691201746463776,
"max": -14.75000087916851,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9300364165595083,
"min": -0.9300364165595083,
"max": -0.8676471105393242,
"count": 2
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.691201746463776,
"min": -30.691201746463776,
"max": -14.75000087916851,
"count": 2
},
"Pyramids.Policy.RndReward.mean": {
"value": 2.1055288766369675,
"min": 2.1055288766369675,
"max": 7.645688619683771,
"count": 2
},
"Pyramids.Policy.RndReward.sum": {
"value": 69.48245292901993,
"min": 69.48245292901993,
"max": 129.9767065346241,
"count": 2
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 2
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688150937",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688151120"
},
"total": 182.8636017839999,
"count": 1,
"self": 0.7435107369992693,
"children": {
"run_training.setup": {
"total": 0.039019954000650614,
"count": 1,
"self": 0.039019954000650614
},
"TrainerController.start_learning": {
"total": 182.081071093,
"count": 1,
"self": 0.1162451240406881,
"children": {
"TrainerController._reset_env": {
"total": 4.069811587000004,
"count": 1,
"self": 4.069811587000004
},
"TrainerController.advance": {
"total": 177.5492924739592,
"count": 5566,
"self": 0.12853013498715882,
"children": {
"env_step": {
"total": 118.38686343394966,
"count": 5566,
"self": 107.95289480596693,
"children": {
"SubprocessEnvManager._take_step": {
"total": 10.364543268992747,
"count": 5566,
"self": 0.44195050896360044,
"children": {
"TorchPolicy.evaluate": {
"total": 9.922592760029147,
"count": 5558,
"self": 9.922592760029147
}
}
},
"workers": {
"total": 0.06942535898997448,
"count": 5565,
"self": 0.0,
"children": {
"worker_root": {
"total": 181.37807718399108,
"count": 5565,
"is_parallel": true,
"self": 84.12783427796694,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002023523999923782,
"count": 1,
"is_parallel": true,
"self": 0.0006658510019406094,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013576729979831725,
"count": 8,
"is_parallel": true,
"self": 0.0013576729979831725
}
}
},
"UnityEnvironment.step": {
"total": 0.05249964300037391,
"count": 1,
"is_parallel": true,
"self": 0.000581213000259595,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006046050002623815,
"count": 1,
"is_parallel": true,
"self": 0.0006046050002623815
},
"communicator.exchange": {
"total": 0.0492361719998371,
"count": 1,
"is_parallel": true,
"self": 0.0492361719998371
},
"steps_from_proto": {
"total": 0.002077653000014834,
"count": 1,
"is_parallel": true,
"self": 0.00046204699992813403,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016156060000867,
"count": 8,
"is_parallel": true,
"self": 0.0016156060000867
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 97.25024290602414,
"count": 5564,
"is_parallel": true,
"self": 2.9804475440232636,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.1862967480310544,
"count": 5564,
"is_parallel": true,
"self": 2.1862967480310544
},
"communicator.exchange": {
"total": 82.57591988700824,
"count": 5564,
"is_parallel": true,
"self": 82.57591988700824
},
"steps_from_proto": {
"total": 9.507578726961583,
"count": 5564,
"is_parallel": true,
"self": 1.882863248942158,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.624715478019425,
"count": 44512,
"is_parallel": true,
"self": 7.624715478019425
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 59.0338989050224,
"count": 5565,
"self": 0.15028441703907447,
"children": {
"process_trajectory": {
"total": 9.995568091980203,
"count": 5565,
"self": 9.995568091980203
},
"_update_policy": {
"total": 48.88804639600312,
"count": 29,
"self": 31.33825101398179,
"children": {
"TorchPPOOptimizer.update": {
"total": 17.54979538202133,
"count": 2001,
"self": 17.54979538202133
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5670002539991401e-06,
"count": 1,
"self": 1.5670002539991401e-06
},
"TrainerController._save_models": {
"total": 0.34572034099983284,
"count": 1,
"self": 0.0018535259996497189,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3438668150001831,
"count": 1,
"self": 0.3438668150001831
}
}
}
}
}
}
}