| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 30.0, | |
| "eval_steps": 500, | |
| "global_step": 120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 2.493036906838148e-05, | |
| "loss": 1.6733, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.4648822424126846e-05, | |
| "loss": 1.5341, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 2.4155902867554447e-05, | |
| "loss": 1.5014, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 2.3460186525516237e-05, | |
| "loss": 1.3282, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 6.25, | |
| "learning_rate": 2.2573777911867937e-05, | |
| "loss": 1.2251, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 7.5, | |
| "learning_rate": 2.1512099325465313e-05, | |
| "loss": 1.1738, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 8.75, | |
| "learning_rate": 2.029362252323417e-05, | |
| "loss": 1.0707, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "learning_rate": 1.89395473368362e-05, | |
| "loss": 0.9962, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 11.25, | |
| "learning_rate": 1.747343282456151e-05, | |
| "loss": 0.8998, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 12.5, | |
| "learning_rate": 1.5920787375901038e-05, | |
| "loss": 0.8471, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 13.75, | |
| "learning_rate": 1.4308624900419257e-05, | |
| "loss": 0.7282, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "learning_rate": 1.2664994822634489e-05, | |
| "loss": 0.6841, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 16.25, | |
| "learning_rate": 1.1018494060353718e-05, | |
| "loss": 0.6456, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 17.5, | |
| "learning_rate": 9.397769477372204e-06, | |
| "loss": 0.6238, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 18.75, | |
| "learning_rate": 7.831019467181105e-06, | |
| "loss": 0.5803, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "learning_rate": 6.345503339445723e-06, | |
| "loss": 0.5901, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 21.25, | |
| "learning_rate": 4.967067045259296e-06, | |
| "loss": 0.5654, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 22.5, | |
| "learning_rate": 3.7196934929050185e-06, | |
| "loss": 0.5594, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 23.75, | |
| "learning_rate": 2.625085278017651e-06, | |
| "loss": 0.5406, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "learning_rate": 1.7022870880697955e-06, | |
| "loss": 0.5569, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 26.25, | |
| "learning_rate": 9.673543508289639e-07, | |
| "loss": 0.5415, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 27.5, | |
| "learning_rate": 4.3307389184951696e-07, | |
| "loss": 0.5503, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 28.75, | |
| "learning_rate": 1.087414611775403e-07, | |
| "loss": 0.5384, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.5373, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "step": 120, | |
| "total_flos": 2.36562943574016e+16, | |
| "train_loss": 0.8538130601247151, | |
| "train_runtime": 2104.0236, | |
| "train_samples_per_second": 1.141, | |
| "train_steps_per_second": 0.057 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 120, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 100, | |
| "total_flos": 2.36562943574016e+16, | |
| "train_batch_size": 5, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |