| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 7092, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.021150592216582064, | |
| "grad_norm": 8.332942008972168, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 11.4949, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.04230118443316413, | |
| "grad_norm": 6.6360764503479, | |
| "learning_rate": 3.266666666666667e-05, | |
| "loss": 9.8459, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06345177664974619, | |
| "grad_norm": 8.05850601196289, | |
| "learning_rate": 4.933333333333334e-05, | |
| "loss": 8.5587, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.08460236886632826, | |
| "grad_norm": 8.233019828796387, | |
| "learning_rate": 4.9654278305963706e-05, | |
| "loss": 7.976, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.10575296108291032, | |
| "grad_norm": 7.412795066833496, | |
| "learning_rate": 4.929415154134255e-05, | |
| "loss": 7.5136, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12690355329949238, | |
| "grad_norm": 8.026376724243164, | |
| "learning_rate": 4.893402477672141e-05, | |
| "loss": 7.2849, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.14805414551607446, | |
| "grad_norm": 13.290921211242676, | |
| "learning_rate": 4.857389801210026e-05, | |
| "loss": 7.1981, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1692047377326565, | |
| "grad_norm": 9.613779067993164, | |
| "learning_rate": 4.821377124747912e-05, | |
| "loss": 6.9762, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.19035532994923857, | |
| "grad_norm": 10.849081993103027, | |
| "learning_rate": 4.785364448285797e-05, | |
| "loss": 6.986, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.21150592216582065, | |
| "grad_norm": 9.172639846801758, | |
| "learning_rate": 4.7493517718236826e-05, | |
| "loss": 6.9641, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2326565143824027, | |
| "grad_norm": 13.746626853942871, | |
| "learning_rate": 4.713339095361568e-05, | |
| "loss": 6.7526, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.25380710659898476, | |
| "grad_norm": 9.715788841247559, | |
| "learning_rate": 4.677326418899453e-05, | |
| "loss": 6.7382, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.2749576988155668, | |
| "grad_norm": 8.96385383605957, | |
| "learning_rate": 4.641313742437338e-05, | |
| "loss": 6.6046, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.2961082910321489, | |
| "grad_norm": 7.643514156341553, | |
| "learning_rate": 4.605301065975223e-05, | |
| "loss": 6.4929, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.31725888324873097, | |
| "grad_norm": 8.421730041503906, | |
| "learning_rate": 4.569288389513109e-05, | |
| "loss": 6.4243, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.338409475465313, | |
| "grad_norm": 21.085466384887695, | |
| "learning_rate": 4.533275713050994e-05, | |
| "loss": 6.3454, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.3595600676818951, | |
| "grad_norm": 9.470216751098633, | |
| "learning_rate": 4.4972630365888796e-05, | |
| "loss": 6.2785, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.38071065989847713, | |
| "grad_norm": 9.164347648620605, | |
| "learning_rate": 4.461250360126765e-05, | |
| "loss": 6.4042, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.40186125211505924, | |
| "grad_norm": 8.708257675170898, | |
| "learning_rate": 4.4252376836646505e-05, | |
| "loss": 6.1933, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.4230118443316413, | |
| "grad_norm": 10.921855926513672, | |
| "learning_rate": 4.3892250072025356e-05, | |
| "loss": 5.9611, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.8883248730964467, | |
| "grad_norm": 6.086740493774414, | |
| "learning_rate": 3.677856301531213e-05, | |
| "loss": 5.9772, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.9306260575296108, | |
| "grad_norm": 5.576613426208496, | |
| "learning_rate": 3.604240282685513e-05, | |
| "loss": 5.8948, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.9729272419627749, | |
| "grad_norm": 6.084731578826904, | |
| "learning_rate": 3.530624263839812e-05, | |
| "loss": 5.8356, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.015228426395939, | |
| "grad_norm": 6.4574198722839355, | |
| "learning_rate": 3.457008244994111e-05, | |
| "loss": 5.8376, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.0575296108291032, | |
| "grad_norm": 6.7739691734313965, | |
| "learning_rate": 3.38339222614841e-05, | |
| "loss": 5.7059, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.0998307952622675, | |
| "grad_norm": 6.258169174194336, | |
| "learning_rate": 3.30977620730271e-05, | |
| "loss": 5.7126, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.1421319796954315, | |
| "grad_norm": 6.656664848327637, | |
| "learning_rate": 3.236160188457009e-05, | |
| "loss": 5.6466, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.1844331641285957, | |
| "grad_norm": 9.155718803405762, | |
| "learning_rate": 3.162544169611308e-05, | |
| "loss": 5.5292, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.2267343485617597, | |
| "grad_norm": 6.5999345779418945, | |
| "learning_rate": 3.088928150765606e-05, | |
| "loss": 5.4244, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.2690355329949239, | |
| "grad_norm": 7.881080150604248, | |
| "learning_rate": 3.0153121319199055e-05, | |
| "loss": 5.4133, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.3113367174280879, | |
| "grad_norm": 6.419029235839844, | |
| "learning_rate": 2.941696113074205e-05, | |
| "loss": 5.42, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.353637901861252, | |
| "grad_norm": 7.26897668838501, | |
| "learning_rate": 2.868080094228504e-05, | |
| "loss": 5.3998, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.3959390862944163, | |
| "grad_norm": 7.908596515655518, | |
| "learning_rate": 2.7944640753828033e-05, | |
| "loss": 5.3602, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.4382402707275803, | |
| "grad_norm": 6.74896240234375, | |
| "learning_rate": 2.7208480565371023e-05, | |
| "loss": 5.341, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.4805414551607445, | |
| "grad_norm": 6.694516658782959, | |
| "learning_rate": 2.6472320376914017e-05, | |
| "loss": 5.3151, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.5228426395939088, | |
| "grad_norm": 8.561161994934082, | |
| "learning_rate": 2.5736160188457007e-05, | |
| "loss": 5.242, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.5651438240270727, | |
| "grad_norm": 6.73771333694458, | |
| "learning_rate": 2.5e-05, | |
| "loss": 5.3066, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.6074450084602367, | |
| "grad_norm": 5.139662265777588, | |
| "learning_rate": 2.4263839811542995e-05, | |
| "loss": 5.2239, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.649746192893401, | |
| "grad_norm": 9.075642585754395, | |
| "learning_rate": 2.3527679623085985e-05, | |
| "loss": 5.2235, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.6920473773265652, | |
| "grad_norm": 7.239570140838623, | |
| "learning_rate": 2.279151943462898e-05, | |
| "loss": 5.1589, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.7343485617597292, | |
| "grad_norm": 6.387134552001953, | |
| "learning_rate": 2.205535924617197e-05, | |
| "loss": 5.0827, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 1.7766497461928934, | |
| "grad_norm": 6.9777913093566895, | |
| "learning_rate": 2.131919905771496e-05, | |
| "loss": 5.0286, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 1.8189509306260576, | |
| "grad_norm": 6.586590766906738, | |
| "learning_rate": 2.058303886925795e-05, | |
| "loss": 5.0558, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.8612521150592216, | |
| "grad_norm": 8.922995567321777, | |
| "learning_rate": 1.9846878680800944e-05, | |
| "loss": 5.0571, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 1.9035532994923858, | |
| "grad_norm": 15.648545265197754, | |
| "learning_rate": 1.9110718492343934e-05, | |
| "loss": 5.0074, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.94585448392555, | |
| "grad_norm": 9.67465877532959, | |
| "learning_rate": 1.8374558303886928e-05, | |
| "loss": 4.9584, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 1.988155668358714, | |
| "grad_norm": 8.49885082244873, | |
| "learning_rate": 1.7638398115429918e-05, | |
| "loss": 4.9303, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.030456852791878, | |
| "grad_norm": 7.5250396728515625, | |
| "learning_rate": 1.690223792697291e-05, | |
| "loss": 4.8864, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.0727580372250425, | |
| "grad_norm": 6.662505626678467, | |
| "learning_rate": 1.6166077738515902e-05, | |
| "loss": 4.8241, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.1150592216582065, | |
| "grad_norm": 13.685736656188965, | |
| "learning_rate": 1.5429917550058892e-05, | |
| "loss": 4.8014, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.1573604060913705, | |
| "grad_norm": 6.445871353149414, | |
| "learning_rate": 1.4693757361601884e-05, | |
| "loss": 4.8768, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 2.199661590524535, | |
| "grad_norm": 6.524866580963135, | |
| "learning_rate": 1.3957597173144876e-05, | |
| "loss": 4.8566, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 2.241962774957699, | |
| "grad_norm": 8.739165306091309, | |
| "learning_rate": 1.3221436984687868e-05, | |
| "loss": 4.7029, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 2.284263959390863, | |
| "grad_norm": 10.373656272888184, | |
| "learning_rate": 1.248527679623086e-05, | |
| "loss": 4.7585, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 2.326565143824027, | |
| "grad_norm": 9.050317764282227, | |
| "learning_rate": 1.1749116607773852e-05, | |
| "loss": 4.7496, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.3688663282571913, | |
| "grad_norm": 7.840747356414795, | |
| "learning_rate": 1.1012956419316844e-05, | |
| "loss": 4.7871, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 2.4111675126903553, | |
| "grad_norm": 7.422430515289307, | |
| "learning_rate": 1.0276796230859835e-05, | |
| "loss": 4.7949, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 2.4534686971235193, | |
| "grad_norm": 6.53894567489624, | |
| "learning_rate": 9.540636042402827e-06, | |
| "loss": 4.7628, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 2.495769881556684, | |
| "grad_norm": 10.329708099365234, | |
| "learning_rate": 8.804475853945819e-06, | |
| "loss": 4.6616, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 2.5380710659898478, | |
| "grad_norm": 8.34196949005127, | |
| "learning_rate": 8.06831566548881e-06, | |
| "loss": 4.8091, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.5803722504230118, | |
| "grad_norm": 6.8171706199646, | |
| "learning_rate": 7.332155477031802e-06, | |
| "loss": 4.6833, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 2.6226734348561758, | |
| "grad_norm": 6.502234935760498, | |
| "learning_rate": 6.595995288574794e-06, | |
| "loss": 4.579, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 2.66497461928934, | |
| "grad_norm": 9.155757904052734, | |
| "learning_rate": 5.859835100117786e-06, | |
| "loss": 4.6477, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 2.707275803722504, | |
| "grad_norm": 6.970887660980225, | |
| "learning_rate": 5.123674911660778e-06, | |
| "loss": 4.6742, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 2.749576988155668, | |
| "grad_norm": 11.221705436706543, | |
| "learning_rate": 4.387514723203769e-06, | |
| "loss": 4.6385, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 2.7918781725888326, | |
| "grad_norm": 10.325379371643066, | |
| "learning_rate": 3.6513545347467615e-06, | |
| "loss": 4.7299, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 2.8341793570219966, | |
| "grad_norm": 6.159130573272705, | |
| "learning_rate": 2.9151943462897527e-06, | |
| "loss": 4.6718, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 2.8764805414551606, | |
| "grad_norm": 7.569215774536133, | |
| "learning_rate": 2.1790341578327443e-06, | |
| "loss": 4.6594, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 2.9187817258883246, | |
| "grad_norm": 7.764076232910156, | |
| "learning_rate": 1.4428739693757363e-06, | |
| "loss": 4.6538, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 2.961082910321489, | |
| "grad_norm": 7.675158977508545, | |
| "learning_rate": 7.067137809187279e-07, | |
| "loss": 4.6145, | |
| "step": 7000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 7092, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9190341574201800.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |