| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "global_step": 27162, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.53765871126403e-05, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.569372557204213e-05, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 5.458333490960582e-06, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 7.872625093610281e-07, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 1.5400873355048252e-05, | |
| "loss": 0.016, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 3.765988585097408e-05, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.9835698514572227e-05, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.223066054130568e-05, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 2.0901944840911743e-05, | |
| "loss": 0.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.837200836582399e-06, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.4244274297043595e-06, | |
| "loss": 0.0, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.9992385621444293e-05, | |
| "loss": 0.0, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.154875074327748e-05, | |
| "loss": 0.0, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.9924557787092216e-05, | |
| "loss": 0.0, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 3.844874058578692e-05, | |
| "loss": 0.0, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 1.6261425633560656e-05, | |
| "loss": 0.0, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.034109580509643e-06, | |
| "loss": 0.0, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.89486944725813e-06, | |
| "loss": 0.0, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 2.4768731754636805e-05, | |
| "loss": 0.0, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.482679192606569e-05, | |
| "loss": 0.0, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.9093437595197226e-05, | |
| "loss": 0.0, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 3.417042004146505e-05, | |
| "loss": 0.0, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.1943452127283138e-05, | |
| "loss": 0.0, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 1.1561283643137799e-07, | |
| "loss": 0.0, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 8.107403155902372e-06, | |
| "loss": 0.0, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 2.9553614129006553e-05, | |
| "loss": 0.0, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.737301505322301e-05, | |
| "loss": 0.0, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.737301505322307e-05, | |
| "loss": 0.0, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 2.95536141290066e-05, | |
| "loss": 0.0, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 8.107403155902407e-06, | |
| "loss": 0.0, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 1.1561283643137244e-07, | |
| "loss": 0.0, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.1943452127283176e-05, | |
| "loss": 0.0, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 3.4170420041465084e-05, | |
| "loss": 0.0, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.9093437595197205e-05, | |
| "loss": 0.0, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.482679192606572e-05, | |
| "loss": 0.0, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 2.4768731754636944e-05, | |
| "loss": 0.0, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.8948694472581606e-06, | |
| "loss": 0.0, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.034109580509668e-06, | |
| "loss": 0.0, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 1.626142563356061e-05, | |
| "loss": 0.0, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 3.84487405857868e-05, | |
| "loss": 0.0, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.9924557787092216e-05, | |
| "loss": 0.0, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 4.1548750743277545e-05, | |
| "loss": 0.0, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.9992385621444296e-05, | |
| "loss": 0.0, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.4244274297043235e-06, | |
| "loss": 0.0, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 2.8372008365823154e-06, | |
| "loss": 0.0, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 2.090194484091165e-05, | |
| "loss": 0.0, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.2230660541305694e-05, | |
| "loss": 0.0, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.983569851457224e-05, | |
| "loss": 0.0, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 3.765988585097404e-05, | |
| "loss": 0.0, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 1.54008733550483e-05, | |
| "loss": 0.0, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 7.872625093610641e-07, | |
| "loss": 0.0, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 5.4583334909605515e-06, | |
| "loss": 0.0, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 2.5693725572042e-05, | |
| "loss": 0.0, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.537658711264028e-05, | |
| "loss": 0.0, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 27162, | |
| "total_flos": 1.9963859836602778e+17, | |
| "train_loss": 0.00029512168328687095, | |
| "train_runtime": 29734.639, | |
| "train_samples_per_second": 3.654, | |
| "train_steps_per_second": 0.913 | |
| } | |
| ], | |
| "max_steps": 27162, | |
| "num_train_epochs": 1, | |
| "total_flos": 1.9963859836602778e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |