| { | |
| "best_global_step": 4360, | |
| "best_metric": 1.0, | |
| "best_model_checkpoint": "gcbert_ckpt/checkpoint-4360", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 4360, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022938410368161487, | |
| "grad_norm": 29.002798080444336, | |
| "learning_rate": 2.809633027522936e-06, | |
| "loss": 1.1878, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.045876820736322974, | |
| "grad_norm": 68.14753723144531, | |
| "learning_rate": 5.676605504587156e-06, | |
| "loss": 0.6556, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06881523110448445, | |
| "grad_norm": 54.466773986816406, | |
| "learning_rate": 8.543577981651376e-06, | |
| "loss": 0.288, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.09175364147264595, | |
| "grad_norm": 0.0029387406539171934, | |
| "learning_rate": 1.1410550458715597e-05, | |
| "loss": 0.0993, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.11469205184080743, | |
| "grad_norm": 0.00012727684224955738, | |
| "learning_rate": 1.4277522935779817e-05, | |
| "loss": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1376304622089689, | |
| "grad_norm": 0.0007764559122733772, | |
| "learning_rate": 1.714449541284404e-05, | |
| "loss": 0.0, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.1605688725771304, | |
| "grad_norm": 7.87975950515829e-05, | |
| "learning_rate": 2.0011467889908257e-05, | |
| "loss": 0.0, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1835072829452919, | |
| "grad_norm": 0.0004955148906446993, | |
| "learning_rate": 2.2878440366972478e-05, | |
| "loss": 0.0, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.2064456933134534, | |
| "grad_norm": 0.00011743771756300703, | |
| "learning_rate": 2.57454128440367e-05, | |
| "loss": 0.0, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.22938410368161485, | |
| "grad_norm": 0.0005010354216210544, | |
| "learning_rate": 2.861238532110092e-05, | |
| "loss": 0.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.25232251404977635, | |
| "grad_norm": 0.00021343142725527287, | |
| "learning_rate": 3.147935779816514e-05, | |
| "loss": 0.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.2752609244179378, | |
| "grad_norm": 0.00011905051360372454, | |
| "learning_rate": 3.434633027522936e-05, | |
| "loss": 0.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.29819933478609933, | |
| "grad_norm": 0.00038216536631807685, | |
| "learning_rate": 3.7213302752293576e-05, | |
| "loss": 0.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.3211377451542608, | |
| "grad_norm": 4.422829078976065e-05, | |
| "learning_rate": 4.00802752293578e-05, | |
| "loss": 0.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.3440761555224223, | |
| "grad_norm": 0.0002208059886470437, | |
| "learning_rate": 4.2947247706422025e-05, | |
| "loss": 0.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3670145658905838, | |
| "grad_norm": 5.1115232054144144e-05, | |
| "learning_rate": 4.581422018348624e-05, | |
| "loss": 0.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.38995297625874525, | |
| "grad_norm": 7.042491051834077e-05, | |
| "learning_rate": 4.868119266055046e-05, | |
| "loss": 0.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.4128913866269068, | |
| "grad_norm": 3.786469824262895e-05, | |
| "learning_rate": 4.982798165137615e-05, | |
| "loss": 0.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.43582979699506824, | |
| "grad_norm": 1.468727168685291e-05, | |
| "learning_rate": 4.950942915392457e-05, | |
| "loss": 0.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.4587682073632297, | |
| "grad_norm": 3.0084795071161352e-05, | |
| "learning_rate": 4.919087665647299e-05, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.48170661773139123, | |
| "grad_norm": 2.5331471988465637e-05, | |
| "learning_rate": 4.887232415902141e-05, | |
| "loss": 0.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.5046450280995527, | |
| "grad_norm": 2.618041617097333e-05, | |
| "learning_rate": 4.855377166156983e-05, | |
| "loss": 0.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.5275834384677142, | |
| "grad_norm": 2.4747334464336745e-05, | |
| "learning_rate": 4.823521916411825e-05, | |
| "loss": 0.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.5505218488358756, | |
| "grad_norm": 1.4968503819545731e-05, | |
| "learning_rate": 4.791666666666667e-05, | |
| "loss": 0.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.5734602592040372, | |
| "grad_norm": 0.00038964845589362085, | |
| "learning_rate": 4.759811416921509e-05, | |
| "loss": 0.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5963986695721987, | |
| "grad_norm": 2.8935370210092515e-05, | |
| "learning_rate": 4.7279561671763515e-05, | |
| "loss": 0.0, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.6193370799403601, | |
| "grad_norm": 8.388165042561013e-06, | |
| "learning_rate": 4.6961009174311924e-05, | |
| "loss": 0.0, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.6422754903085216, | |
| "grad_norm": 1.768140464264434e-05, | |
| "learning_rate": 4.664245667686035e-05, | |
| "loss": 0.0, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.6652139006766831, | |
| "grad_norm": 1.36974013003055e-05, | |
| "learning_rate": 4.6323904179408764e-05, | |
| "loss": 0.0, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.6881523110448446, | |
| "grad_norm": 9.414131454832386e-06, | |
| "learning_rate": 4.600535168195719e-05, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.7110907214130061, | |
| "grad_norm": 4.622045162250288e-05, | |
| "learning_rate": 4.568679918450561e-05, | |
| "loss": 0.0, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.7340291317811676, | |
| "grad_norm": 2.692650923563633e-05, | |
| "learning_rate": 4.536824668705403e-05, | |
| "loss": 0.0, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.756967542149329, | |
| "grad_norm": 1.0936350008705631e-05, | |
| "learning_rate": 4.504969418960245e-05, | |
| "loss": 0.0, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.7799059525174905, | |
| "grad_norm": 5.966558546788292e-06, | |
| "learning_rate": 4.473114169215087e-05, | |
| "loss": 0.0, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.802844362885652, | |
| "grad_norm": 9.507278264209162e-06, | |
| "learning_rate": 4.441258919469929e-05, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.8257827732538136, | |
| "grad_norm": 1.0006630873249378e-05, | |
| "learning_rate": 4.409403669724771e-05, | |
| "loss": 0.0, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.848721183621975, | |
| "grad_norm": 5.595240509137511e-06, | |
| "learning_rate": 4.377548419979613e-05, | |
| "loss": 0.0, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.8716595939901365, | |
| "grad_norm": 8.78509945323458e-06, | |
| "learning_rate": 4.3456931702344547e-05, | |
| "loss": 0.0, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.894598004358298, | |
| "grad_norm": 1.4489376553683542e-05, | |
| "learning_rate": 4.313837920489297e-05, | |
| "loss": 0.0, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.9175364147264594, | |
| "grad_norm": 0.0006358507671393454, | |
| "learning_rate": 4.281982670744139e-05, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.940474825094621, | |
| "grad_norm": 2.2141068257042207e-05, | |
| "learning_rate": 4.250127420998981e-05, | |
| "loss": 0.0, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.9634132354627825, | |
| "grad_norm": 2.3798056645318866e-06, | |
| "learning_rate": 4.2182721712538226e-05, | |
| "loss": 0.0, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.9863516458309439, | |
| "grad_norm": 2.4955927074188367e-05, | |
| "learning_rate": 4.186416921508665e-05, | |
| "loss": 0.0, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 1.0, | |
| "eval_f1": 1.0, | |
| "eval_loss": 3.4182854463660206e-11, | |
| "eval_precision": 1.0, | |
| "eval_recall": 1.0, | |
| "eval_runtime": 342.7946, | |
| "eval_samples_per_second": 50.867, | |
| "eval_steps_per_second": 3.18, | |
| "step": 4360 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 17440, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |