| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 8842, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0565482922415743, | |
| "grad_norm": 6.099003791809082, | |
| "learning_rate": 4.7172585387921285e-05, | |
| "loss": 5.2788, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1130965844831486, | |
| "grad_norm": 4.438333988189697, | |
| "learning_rate": 4.434517077584257e-05, | |
| "loss": 4.8237, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.16964487672472292, | |
| "grad_norm": 5.307681083679199, | |
| "learning_rate": 4.1517756163763856e-05, | |
| "loss": 4.737, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.2261931689662972, | |
| "grad_norm": 5.611371040344238, | |
| "learning_rate": 3.869034155168514e-05, | |
| "loss": 4.6918, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2827414612078715, | |
| "grad_norm": 5.551920413970947, | |
| "learning_rate": 3.586292693960643e-05, | |
| "loss": 4.6628, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.33928975344944584, | |
| "grad_norm": 5.272426605224609, | |
| "learning_rate": 3.303551232752771e-05, | |
| "loss": 4.6474, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.39583804569102016, | |
| "grad_norm": 5.137388229370117, | |
| "learning_rate": 3.0208097715448996e-05, | |
| "loss": 4.6272, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.4523863379325944, | |
| "grad_norm": 6.018069744110107, | |
| "learning_rate": 2.7380683103370282e-05, | |
| "loss": 4.6025, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.5089346301741687, | |
| "grad_norm": 5.987523555755615, | |
| "learning_rate": 2.4553268491291564e-05, | |
| "loss": 4.5791, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.565482922415743, | |
| "grad_norm": 5.519354820251465, | |
| "learning_rate": 2.172585387921285e-05, | |
| "loss": 4.5844, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.6220312146573174, | |
| "grad_norm": 5.536919593811035, | |
| "learning_rate": 1.8898439267134132e-05, | |
| "loss": 4.5647, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.6785795068988917, | |
| "grad_norm": 5.017457962036133, | |
| "learning_rate": 1.6071024655055418e-05, | |
| "loss": 4.5574, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.735127799140466, | |
| "grad_norm": 5.590214252471924, | |
| "learning_rate": 1.3243610042976704e-05, | |
| "loss": 4.548, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.7916760913820403, | |
| "grad_norm": 4.2981181144714355, | |
| "learning_rate": 1.0416195430897988e-05, | |
| "loss": 4.5641, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.8482243836236145, | |
| "grad_norm": 5.723196983337402, | |
| "learning_rate": 7.588780818819271e-06, | |
| "loss": 4.5633, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.9047726758651888, | |
| "grad_norm": 5.700985431671143, | |
| "learning_rate": 4.761366206740556e-06, | |
| "loss": 4.5486, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.9613209681067632, | |
| "grad_norm": 5.034333229064941, | |
| "learning_rate": 1.9339515946618415e-06, | |
| "loss": 4.5307, | |
| "step": 8500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 8842, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3333874049690624e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |