| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9933774834437086, | |
| "eval_steps": 50, | |
| "global_step": 75, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06622516556291391, | |
| "grad_norm": 7.145169734954834, | |
| "learning_rate": 5e-05, | |
| "loss": 2.9784, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.13245033112582782, | |
| "grad_norm": 2.2012343406677246, | |
| "learning_rate": 9.994504457428558e-05, | |
| "loss": 0.8648, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1986754966887417, | |
| "grad_norm": 5.701715469360352, | |
| "learning_rate": 9.8034259378842e-05, | |
| "loss": 0.677, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.26490066225165565, | |
| "grad_norm": 0.7785886526107788, | |
| "learning_rate": 9.349531862043952e-05, | |
| "loss": 0.6025, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.33112582781456956, | |
| "grad_norm": 0.4165729880332947, | |
| "learning_rate": 8.657656676318346e-05, | |
| "loss": 0.4445, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3973509933774834, | |
| "grad_norm": 1.1374096870422363, | |
| "learning_rate": 7.765655770625997e-05, | |
| "loss": 0.4018, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.46357615894039733, | |
| "grad_norm": 0.6517850160598755, | |
| "learning_rate": 6.722334251421665e-05, | |
| "loss": 0.4919, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5298013245033113, | |
| "grad_norm": 0.3663589060306549, | |
| "learning_rate": 5.584776609860414e-05, | |
| "loss": 0.4198, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5960264900662252, | |
| "grad_norm": 0.34794583916664124, | |
| "learning_rate": 4.415223390139588e-05, | |
| "loss": 0.398, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6622516556291391, | |
| "grad_norm": 0.3982546031475067, | |
| "learning_rate": 3.277665748578336e-05, | |
| "loss": 0.4235, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7284768211920529, | |
| "grad_norm": 0.32591888308525085, | |
| "learning_rate": 2.234344229374003e-05, | |
| "loss": 0.4253, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7947019867549668, | |
| "grad_norm": 0.3768315017223358, | |
| "learning_rate": 1.3423433236816563e-05, | |
| "loss": 0.4094, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8609271523178808, | |
| "grad_norm": 0.4178403317928314, | |
| "learning_rate": 6.50468137956049e-06, | |
| "loss": 0.4191, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9271523178807947, | |
| "grad_norm": 0.3431091904640198, | |
| "learning_rate": 1.9657406211579966e-06, | |
| "loss": 0.4236, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "grad_norm": 0.37446972727775574, | |
| "learning_rate": 5.4955425714431353e-08, | |
| "loss": 0.431, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "step": 75, | |
| "total_flos": 1.0406942485905408e+16, | |
| "train_loss": 0.6540391174952189, | |
| "train_runtime": 272.6059, | |
| "train_samples_per_second": 1.108, | |
| "train_steps_per_second": 0.275 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 75, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0406942485905408e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |