| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9933774834437086, | |
| "eval_steps": 50, | |
| "global_step": 75, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06622516556291391, | |
| "grad_norm": 12.822922706604004, | |
| "learning_rate": 5e-05, | |
| "loss": 2.1299, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.13245033112582782, | |
| "grad_norm": 2.5671944618225098, | |
| "learning_rate": 9.994504457428558e-05, | |
| "loss": 0.5159, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1986754966887417, | |
| "grad_norm": 1.499721884727478, | |
| "learning_rate": 9.8034259378842e-05, | |
| "loss": 0.4836, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.26490066225165565, | |
| "grad_norm": 1.0522427558898926, | |
| "learning_rate": 9.349531862043952e-05, | |
| "loss": 0.4594, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.33112582781456956, | |
| "grad_norm": 0.566138744354248, | |
| "learning_rate": 8.657656676318346e-05, | |
| "loss": 0.428, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3973509933774834, | |
| "grad_norm": 0.8398123979568481, | |
| "learning_rate": 7.765655770625997e-05, | |
| "loss": 0.3387, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.46357615894039733, | |
| "grad_norm": 0.9298285245895386, | |
| "learning_rate": 6.722334251421665e-05, | |
| "loss": 0.4144, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.5298013245033113, | |
| "grad_norm": 2.466564178466797, | |
| "learning_rate": 5.584776609860414e-05, | |
| "loss": 0.3108, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5960264900662252, | |
| "grad_norm": 1.3170844316482544, | |
| "learning_rate": 4.415223390139588e-05, | |
| "loss": 0.3296, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.6622516556291391, | |
| "grad_norm": 4.341668605804443, | |
| "learning_rate": 3.277665748578336e-05, | |
| "loss": 0.3273, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7284768211920529, | |
| "grad_norm": 5.271987438201904, | |
| "learning_rate": 2.234344229374003e-05, | |
| "loss": 0.3517, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7947019867549668, | |
| "grad_norm": 2.4411556720733643, | |
| "learning_rate": 1.3423433236816563e-05, | |
| "loss": 0.3306, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8609271523178808, | |
| "grad_norm": 0.7362133264541626, | |
| "learning_rate": 6.50468137956049e-06, | |
| "loss": 0.3337, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9271523178807947, | |
| "grad_norm": 0.9481602907180786, | |
| "learning_rate": 1.9657406211579966e-06, | |
| "loss": 0.2901, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "grad_norm": 0.6073076725006104, | |
| "learning_rate": 5.4955425714431353e-08, | |
| "loss": 0.2127, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9933774834437086, | |
| "step": 75, | |
| "total_flos": 1.0406942485905408e+16, | |
| "train_loss": 0.48374417622884114, | |
| "train_runtime": 146.0386, | |
| "train_samples_per_second": 2.068, | |
| "train_steps_per_second": 0.514 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 75, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0406942485905408e+16, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |