| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 958, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0020876826722338203, | |
| "grad_norm": 0.8805100452450126, | |
| "learning_rate": 1.3793103448275863e-05, | |
| "loss": 3.4375, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0041753653444676405, | |
| "grad_norm": 0.8962056177448029, | |
| "learning_rate": 2.7586206896551727e-05, | |
| "loss": 3.4531, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.006263048016701462, | |
| "grad_norm": 0.8975533488683494, | |
| "learning_rate": 4.1379310344827587e-05, | |
| "loss": 3.375, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.008350730688935281, | |
| "grad_norm": 0.8562154080719621, | |
| "learning_rate": 5.517241379310345e-05, | |
| "loss": 3.0312, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.010438413361169102, | |
| "grad_norm": 0.7722844092130382, | |
| "learning_rate": 6.896551724137931e-05, | |
| "loss": 2.0938, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.012526096033402923, | |
| "grad_norm": 0.5469423252625637, | |
| "learning_rate": 8.275862068965517e-05, | |
| "loss": 1.5391, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.014613778705636743, | |
| "grad_norm": 0.269527877565377, | |
| "learning_rate": 9.655172413793105e-05, | |
| "loss": 1.3281, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.016701461377870562, | |
| "grad_norm": 0.2129561647095634, | |
| "learning_rate": 0.0001103448275862069, | |
| "loss": 1.0547, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.018789144050104383, | |
| "grad_norm": 0.209174835903919, | |
| "learning_rate": 0.00012413793103448277, | |
| "loss": 0.8672, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.020876826722338204, | |
| "grad_norm": 0.16723166688980518, | |
| "learning_rate": 0.00013793103448275863, | |
| "loss": 0.7461, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.022964509394572025, | |
| "grad_norm": 0.13523071310549678, | |
| "learning_rate": 0.00015172413793103449, | |
| "loss": 0.75, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.025052192066805846, | |
| "grad_norm": 0.13783786374844356, | |
| "learning_rate": 0.00016551724137931035, | |
| "loss": 0.7578, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.027139874739039668, | |
| "grad_norm": 0.11159117473743674, | |
| "learning_rate": 0.0001793103448275862, | |
| "loss": 0.6914, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.029227557411273485, | |
| "grad_norm": 0.10486826556036052, | |
| "learning_rate": 0.0001931034482758621, | |
| "loss": 0.6484, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.031315240083507306, | |
| "grad_norm": 0.1012538060823961, | |
| "learning_rate": 0.00020689655172413795, | |
| "loss": 0.6562, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.033402922755741124, | |
| "grad_norm": 0.10310591282634196, | |
| "learning_rate": 0.0002206896551724138, | |
| "loss": 0.6602, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.03549060542797495, | |
| "grad_norm": 0.07838991965173675, | |
| "learning_rate": 0.00023448275862068965, | |
| "loss": 0.5469, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.037578288100208766, | |
| "grad_norm": 0.07804122247550843, | |
| "learning_rate": 0.00024827586206896553, | |
| "loss": 0.543, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.03966597077244259, | |
| "grad_norm": 0.08565464666867292, | |
| "learning_rate": 0.00026206896551724137, | |
| "loss": 0.5625, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.04175365344467641, | |
| "grad_norm": 0.07879295016594862, | |
| "learning_rate": 0.00027586206896551725, | |
| "loss": 0.4551, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04384133611691023, | |
| "grad_norm": 0.07549627256843491, | |
| "learning_rate": 0.00028965517241379314, | |
| "loss": 0.4863, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.04592901878914405, | |
| "grad_norm": 0.07816061161931821, | |
| "learning_rate": 0.00030344827586206897, | |
| "loss": 0.459, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.04801670146137787, | |
| "grad_norm": 0.07796400588496231, | |
| "learning_rate": 0.00031724137931034486, | |
| "loss": 0.5156, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.05010438413361169, | |
| "grad_norm": 0.06699113066481945, | |
| "learning_rate": 0.0003310344827586207, | |
| "loss": 0.5508, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.05219206680584551, | |
| "grad_norm": 0.07261576114010894, | |
| "learning_rate": 0.0003448275862068965, | |
| "loss": 0.5469, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.054279749478079335, | |
| "grad_norm": 0.07464032217874282, | |
| "learning_rate": 0.0003586206896551724, | |
| "loss": 0.4805, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.05636743215031315, | |
| "grad_norm": 0.06913377612077472, | |
| "learning_rate": 0.0003724137931034483, | |
| "loss": 0.5117, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.05845511482254697, | |
| "grad_norm": 0.061903733751750956, | |
| "learning_rate": 0.0003862068965517242, | |
| "loss": 0.4844, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.060542797494780795, | |
| "grad_norm": 0.06908123825325603, | |
| "learning_rate": 0.0004, | |
| "loss": 0.4434, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.06263048016701461, | |
| "grad_norm": 0.05444037852658766, | |
| "learning_rate": 0.00039999885641643965, | |
| "loss": 0.4336, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06471816283924843, | |
| "grad_norm": 0.06329218054783699, | |
| "learning_rate": 0.00039999542567883625, | |
| "loss": 0.5078, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.06680584551148225, | |
| "grad_norm": 0.05617311135086726, | |
| "learning_rate": 0.0003999897078264233, | |
| "loss": 0.3418, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.06889352818371608, | |
| "grad_norm": 0.0631466065976358, | |
| "learning_rate": 0.00039998170292458913, | |
| "loss": 0.4512, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.0709812108559499, | |
| "grad_norm": 0.05939776609982831, | |
| "learning_rate": 0.0003999714110648765, | |
| "loss": 0.4727, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.07306889352818371, | |
| "grad_norm": 0.07015310870913878, | |
| "learning_rate": 0.0003999588323649815, | |
| "loss": 0.4316, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.07515657620041753, | |
| "grad_norm": 0.05541574151493628, | |
| "learning_rate": 0.00039994396696875197, | |
| "loss": 0.3691, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.07724425887265135, | |
| "grad_norm": 0.04781737127842468, | |
| "learning_rate": 0.0003999268150461862, | |
| "loss": 0.3711, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.07933194154488518, | |
| "grad_norm": 0.052081481582784105, | |
| "learning_rate": 0.00039990737679343073, | |
| "loss": 0.3633, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.081419624217119, | |
| "grad_norm": 0.04824424451620144, | |
| "learning_rate": 0.0003998856524327782, | |
| "loss": 0.3867, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.08350730688935282, | |
| "grad_norm": 0.048331445388276904, | |
| "learning_rate": 0.0003998616422126649, | |
| "loss": 0.4199, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.08559498956158663, | |
| "grad_norm": 0.06796823383551855, | |
| "learning_rate": 0.00039983534640766766, | |
| "loss": 0.4629, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.08768267223382047, | |
| "grad_norm": 0.04720979824092969, | |
| "learning_rate": 0.0003998067653185011, | |
| "loss": 0.3594, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.08977035490605428, | |
| "grad_norm": 0.05374730136315078, | |
| "learning_rate": 0.00039977589927201373, | |
| "loss": 0.4414, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.0918580375782881, | |
| "grad_norm": 0.050894908072710146, | |
| "learning_rate": 0.0003997427486211847, | |
| "loss": 0.4043, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.09394572025052192, | |
| "grad_norm": 0.04383506588833311, | |
| "learning_rate": 0.0003997073137451194, | |
| "loss": 0.3281, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.09603340292275574, | |
| "grad_norm": 0.055686594769476794, | |
| "learning_rate": 0.00039966959504904515, | |
| "loss": 0.4062, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.09812108559498957, | |
| "grad_norm": 0.05244693300757331, | |
| "learning_rate": 0.00039962959296430676, | |
| "loss": 0.3652, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.10020876826722339, | |
| "grad_norm": 0.05926741920735249, | |
| "learning_rate": 0.0003995873079483616, | |
| "loss": 0.4844, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.1022964509394572, | |
| "grad_norm": 0.04551719284385615, | |
| "learning_rate": 0.00039954274048477405, | |
| "loss": 0.3223, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.10438413361169102, | |
| "grad_norm": 0.04385484814869444, | |
| "learning_rate": 0.0003994958910832104, | |
| "loss": 0.4004, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.10647181628392484, | |
| "grad_norm": 0.04780597607586747, | |
| "learning_rate": 0.0003994467602794327, | |
| "loss": 0.3125, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.10855949895615867, | |
| "grad_norm": 0.08970784709982177, | |
| "learning_rate": 0.00039939534863529265, | |
| "loss": 0.3359, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.11064718162839249, | |
| "grad_norm": 0.055961833035488864, | |
| "learning_rate": 0.00039934165673872536, | |
| "loss": 0.4023, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.1127348643006263, | |
| "grad_norm": 0.051294633986921585, | |
| "learning_rate": 0.00039928568520374263, | |
| "loss": 0.3848, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.11482254697286012, | |
| "grad_norm": 0.05439699876212045, | |
| "learning_rate": 0.0003992274346704257, | |
| "loss": 0.4785, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.11691022964509394, | |
| "grad_norm": 0.04990096696869215, | |
| "learning_rate": 0.00039916690580491806, | |
| "loss": 0.377, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.11899791231732777, | |
| "grad_norm": 0.04880913842131259, | |
| "learning_rate": 0.00039910409929941787, | |
| "loss": 0.3809, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.12108559498956159, | |
| "grad_norm": 0.048110591694635114, | |
| "learning_rate": 0.0003990390158721701, | |
| "loss": 0.375, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.12317327766179541, | |
| "grad_norm": 0.05425382131888079, | |
| "learning_rate": 0.0003989716562674579, | |
| "loss": 0.4297, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.12526096033402923, | |
| "grad_norm": 0.04140427495861459, | |
| "learning_rate": 0.0003989020212555948, | |
| "loss": 0.3164, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.12734864300626306, | |
| "grad_norm": 0.055270560900777276, | |
| "learning_rate": 0.00039883011163291545, | |
| "loss": 0.418, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.12943632567849686, | |
| "grad_norm": 0.04546442173482473, | |
| "learning_rate": 0.0003987559282217662, | |
| "loss": 0.3125, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.1315240083507307, | |
| "grad_norm": 0.05835477454974449, | |
| "learning_rate": 0.00039867947187049654, | |
| "loss": 0.4199, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.1336116910229645, | |
| "grad_norm": 0.06512707863408194, | |
| "learning_rate": 0.00039860074345344875, | |
| "loss": 0.4238, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.13569937369519833, | |
| "grad_norm": 0.04896390157005381, | |
| "learning_rate": 0.0003985197438709479, | |
| "loss": 0.2598, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.13778705636743216, | |
| "grad_norm": 0.048613271081049014, | |
| "learning_rate": 0.00039843647404929207, | |
| "loss": 0.3633, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.13987473903966596, | |
| "grad_norm": 0.050233778334495785, | |
| "learning_rate": 0.0003983509349407412, | |
| "loss": 0.3926, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.1419624217118998, | |
| "grad_norm": 0.051442044526522436, | |
| "learning_rate": 0.0003982631275235064, | |
| "loss": 0.4531, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.1440501043841336, | |
| "grad_norm": 0.04624056126157752, | |
| "learning_rate": 0.00039817305280173893, | |
| "loss": 0.373, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.14613778705636743, | |
| "grad_norm": 0.04350984771291989, | |
| "learning_rate": 0.0003980807118055185, | |
| "loss": 0.377, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.14822546972860126, | |
| "grad_norm": 0.04395725697208839, | |
| "learning_rate": 0.0003979861055908415, | |
| "loss": 0.3633, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.15031315240083507, | |
| "grad_norm": 0.04706297130636851, | |
| "learning_rate": 0.0003978892352396091, | |
| "loss": 0.4199, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.1524008350730689, | |
| "grad_norm": 0.044497773696428565, | |
| "learning_rate": 0.00039779010185961473, | |
| "loss": 0.4707, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.1544885177453027, | |
| "grad_norm": 0.0411155822363719, | |
| "learning_rate": 0.0003976887065845314, | |
| "loss": 0.3262, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.15657620041753653, | |
| "grad_norm": 0.043812847498547546, | |
| "learning_rate": 0.0003975850505738988, | |
| "loss": 0.3359, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.15866388308977036, | |
| "grad_norm": 0.0554940446413714, | |
| "learning_rate": 0.0003974791350131101, | |
| "loss": 0.3867, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.16075156576200417, | |
| "grad_norm": 0.04898321080003146, | |
| "learning_rate": 0.00039737096111339804, | |
| "loss": 0.4648, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.162839248434238, | |
| "grad_norm": 0.04703455829390756, | |
| "learning_rate": 0.00039726053011182176, | |
| "loss": 0.3945, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.1649269311064718, | |
| "grad_norm": 0.048291736323549404, | |
| "learning_rate": 0.00039714784327125196, | |
| "loss": 0.3555, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.16701461377870563, | |
| "grad_norm": 0.048158996103635526, | |
| "learning_rate": 0.00039703290188035687, | |
| "loss": 0.3848, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.16910229645093947, | |
| "grad_norm": 0.041070665599436396, | |
| "learning_rate": 0.00039691570725358726, | |
| "loss": 0.3672, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.17118997912317327, | |
| "grad_norm": 0.04514739709583794, | |
| "learning_rate": 0.0003967962607311617, | |
| "loss": 0.416, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.1732776617954071, | |
| "grad_norm": 0.05018154319088866, | |
| "learning_rate": 0.0003966745636790509, | |
| "loss": 0.4219, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.17536534446764093, | |
| "grad_norm": 0.04615841699661321, | |
| "learning_rate": 0.00039655061748896243, | |
| "loss": 0.3594, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.17745302713987474, | |
| "grad_norm": 0.04770173243618234, | |
| "learning_rate": 0.00039642442357832447, | |
| "loss": 0.3789, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.17954070981210857, | |
| "grad_norm": 0.0529492248920703, | |
| "learning_rate": 0.00039629598339026984, | |
| "loss": 0.4102, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.18162839248434237, | |
| "grad_norm": 0.04108643805111346, | |
| "learning_rate": 0.00039616529839361946, | |
| "loss": 0.3691, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.1837160751565762, | |
| "grad_norm": 0.04230844261748932, | |
| "learning_rate": 0.00039603237008286544, | |
| "loss": 0.3672, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.18580375782881003, | |
| "grad_norm": 0.057536412046600406, | |
| "learning_rate": 0.00039589719997815413, | |
| "loss": 0.3574, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.18789144050104384, | |
| "grad_norm": 0.03943187889859641, | |
| "learning_rate": 0.0003957597896252685, | |
| "loss": 0.332, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.18997912317327767, | |
| "grad_norm": 0.04024451449784526, | |
| "learning_rate": 0.000395620140595611, | |
| "loss": 0.3438, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.19206680584551147, | |
| "grad_norm": 0.04428176826646225, | |
| "learning_rate": 0.0003954782544861847, | |
| "loss": 0.3574, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.1941544885177453, | |
| "grad_norm": 0.045692937481720604, | |
| "learning_rate": 0.00039533413291957596, | |
| "loss": 0.3574, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.19624217118997914, | |
| "grad_norm": 0.04688683747789243, | |
| "learning_rate": 0.00039518777754393535, | |
| "loss": 0.3867, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.19832985386221294, | |
| "grad_norm": 0.05536707488449957, | |
| "learning_rate": 0.0003950391900329588, | |
| "loss": 0.3223, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.20041753653444677, | |
| "grad_norm": 0.04606231136571311, | |
| "learning_rate": 0.0003948883720858687, | |
| "loss": 0.3535, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.20250521920668058, | |
| "grad_norm": 0.04099133317121465, | |
| "learning_rate": 0.00039473532542739435, | |
| "loss": 0.3066, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.2045929018789144, | |
| "grad_norm": 0.055252229005040635, | |
| "learning_rate": 0.0003945800518077521, | |
| "loss": 0.3809, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.20668058455114824, | |
| "grad_norm": 0.04490759353014913, | |
| "learning_rate": 0.00039442255300262553, | |
| "loss": 0.373, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.20876826722338204, | |
| "grad_norm": 0.04889359614788617, | |
| "learning_rate": 0.0003942628308131452, | |
| "loss": 0.377, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.21085594989561587, | |
| "grad_norm": 0.04331101172153666, | |
| "learning_rate": 0.0003941008870658677, | |
| "loss": 0.3926, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.21294363256784968, | |
| "grad_norm": 0.039340556954521134, | |
| "learning_rate": 0.00039393672361275506, | |
| "loss": 0.3379, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.2150313152400835, | |
| "grad_norm": 0.047692728640419506, | |
| "learning_rate": 0.0003937703423311537, | |
| "loss": 0.375, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.21711899791231734, | |
| "grad_norm": 0.04193899617132502, | |
| "learning_rate": 0.00039360174512377246, | |
| "loss": 0.3203, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.21920668058455114, | |
| "grad_norm": 0.04450971768791051, | |
| "learning_rate": 0.0003934309339186614, | |
| "loss": 0.3789, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.22129436325678498, | |
| "grad_norm": 0.04610401641341007, | |
| "learning_rate": 0.00039325791066918925, | |
| "loss": 0.4004, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.22338204592901878, | |
| "grad_norm": 0.0394316515696973, | |
| "learning_rate": 0.00039308267735402156, | |
| "loss": 0.2734, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.2254697286012526, | |
| "grad_norm": 0.04552211598084851, | |
| "learning_rate": 0.00039290523597709765, | |
| "loss": 0.3555, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.22755741127348644, | |
| "grad_norm": 0.044355074180719295, | |
| "learning_rate": 0.00039272558856760797, | |
| "loss": 0.3418, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.22964509394572025, | |
| "grad_norm": 0.04664869616431499, | |
| "learning_rate": 0.0003925437371799708, | |
| "loss": 0.3301, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.23173277661795408, | |
| "grad_norm": 0.04421582409863553, | |
| "learning_rate": 0.0003923596838938086, | |
| "loss": 0.3242, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.23382045929018788, | |
| "grad_norm": 0.04914905461390972, | |
| "learning_rate": 0.00039217343081392463, | |
| "loss": 0.3027, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.2359081419624217, | |
| "grad_norm": 0.04801403896872833, | |
| "learning_rate": 0.00039198498007027836, | |
| "loss": 0.377, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.23799582463465555, | |
| "grad_norm": 0.039909379565673524, | |
| "learning_rate": 0.0003917943338179616, | |
| "loss": 0.2949, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.24008350730688935, | |
| "grad_norm": 0.04562132710784955, | |
| "learning_rate": 0.00039160149423717353, | |
| "loss": 0.3828, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.24217118997912318, | |
| "grad_norm": 0.04218187984586478, | |
| "learning_rate": 0.00039140646353319584, | |
| "loss": 0.3379, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.24425887265135698, | |
| "grad_norm": 0.13995497079449562, | |
| "learning_rate": 0.0003912092439363677, | |
| "loss": 0.3887, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.24634655532359082, | |
| "grad_norm": 0.03956892044992931, | |
| "learning_rate": 0.00039100983770205995, | |
| "loss": 0.3027, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.24843423799582465, | |
| "grad_norm": 0.043965744196170775, | |
| "learning_rate": 0.00039080824711064946, | |
| "loss": 0.3223, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.25052192066805845, | |
| "grad_norm": 0.044644460638659476, | |
| "learning_rate": 0.00039060447446749314, | |
| "loss": 0.3496, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.25260960334029225, | |
| "grad_norm": 0.05275646603548191, | |
| "learning_rate": 0.00039039852210290147, | |
| "loss": 0.3438, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.2546972860125261, | |
| "grad_norm": 0.04064451365674439, | |
| "learning_rate": 0.00039019039237211175, | |
| "loss": 0.3359, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.2567849686847599, | |
| "grad_norm": 0.048323141931080164, | |
| "learning_rate": 0.00038998008765526136, | |
| "loss": 0.377, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.2588726513569937, | |
| "grad_norm": 0.03725290930480903, | |
| "learning_rate": 0.00038976761035736064, | |
| "loss": 0.2988, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.2609603340292276, | |
| "grad_norm": 0.04052181522164268, | |
| "learning_rate": 0.00038955296290826483, | |
| "loss": 0.3203, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.2630480167014614, | |
| "grad_norm": 0.04516920645500193, | |
| "learning_rate": 0.00038933614776264695, | |
| "loss": 0.3496, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.2651356993736952, | |
| "grad_norm": 0.04887108820007405, | |
| "learning_rate": 0.00038911716739996945, | |
| "loss": 0.2734, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.267223382045929, | |
| "grad_norm": 0.04662438292740599, | |
| "learning_rate": 0.0003888960243244556, | |
| "loss": 0.2949, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.26931106471816285, | |
| "grad_norm": 0.0462101738931956, | |
| "learning_rate": 0.0003886727210650613, | |
| "loss": 0.3711, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.27139874739039666, | |
| "grad_norm": 0.045514397288015096, | |
| "learning_rate": 0.00038844726017544606, | |
| "loss": 0.3945, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.27348643006263046, | |
| "grad_norm": 0.04564176989369504, | |
| "learning_rate": 0.00038821964423394335, | |
| "loss": 0.3262, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.2755741127348643, | |
| "grad_norm": 0.0480634271209225, | |
| "learning_rate": 0.00038798987584353175, | |
| "loss": 0.4297, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.2776617954070981, | |
| "grad_norm": 0.04064367156377542, | |
| "learning_rate": 0.0003877579576318048, | |
| "loss": 0.3066, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.2797494780793319, | |
| "grad_norm": 0.06187073759126634, | |
| "learning_rate": 0.000387523892250941, | |
| "loss": 0.3828, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.2818371607515658, | |
| "grad_norm": 0.047235774434653544, | |
| "learning_rate": 0.0003872876823776737, | |
| "loss": 0.4551, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.2839248434237996, | |
| "grad_norm": 0.04613794323687757, | |
| "learning_rate": 0.00038704933071326004, | |
| "loss": 0.3555, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.2860125260960334, | |
| "grad_norm": 0.04123661418763717, | |
| "learning_rate": 0.00038680883998345046, | |
| "loss": 0.3848, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.2881002087682672, | |
| "grad_norm": 0.03990534224519547, | |
| "learning_rate": 0.0003865662129384575, | |
| "loss": 0.3223, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.29018789144050106, | |
| "grad_norm": 0.03964108882883799, | |
| "learning_rate": 0.00038632145235292407, | |
| "loss": 0.3145, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.29227557411273486, | |
| "grad_norm": 0.04771985690919652, | |
| "learning_rate": 0.00038607456102589217, | |
| "loss": 0.3555, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.29436325678496866, | |
| "grad_norm": 0.041459959268485676, | |
| "learning_rate": 0.0003858255417807701, | |
| "loss": 0.3203, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.2964509394572025, | |
| "grad_norm": 0.04488781487797441, | |
| "learning_rate": 0.0003855743974653014, | |
| "loss": 0.3516, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.2985386221294363, | |
| "grad_norm": 0.045595447329589846, | |
| "learning_rate": 0.00038532113095153086, | |
| "loss": 0.4121, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.30062630480167013, | |
| "grad_norm": 0.051789411309846184, | |
| "learning_rate": 0.0003850657451357729, | |
| "loss": 0.3945, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.302713987473904, | |
| "grad_norm": 0.04655813617947128, | |
| "learning_rate": 0.00038480824293857756, | |
| "loss": 0.377, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.3048016701461378, | |
| "grad_norm": 0.07648712725066147, | |
| "learning_rate": 0.00038454862730469773, | |
| "loss": 0.3438, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.3068893528183716, | |
| "grad_norm": 0.05310195833383878, | |
| "learning_rate": 0.0003842869012030551, | |
| "loss": 0.3594, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.3089770354906054, | |
| "grad_norm": 0.04391887584861711, | |
| "learning_rate": 0.00038402306762670636, | |
| "loss": 0.3555, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.31106471816283926, | |
| "grad_norm": 0.038985625260836355, | |
| "learning_rate": 0.000383757129592809, | |
| "loss": 0.2969, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.31315240083507306, | |
| "grad_norm": 0.050635143690619895, | |
| "learning_rate": 0.00038348909014258647, | |
| "loss": 0.3398, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.31524008350730687, | |
| "grad_norm": 0.04475955558328409, | |
| "learning_rate": 0.00038321895234129394, | |
| "loss": 0.3594, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.3173277661795407, | |
| "grad_norm": 0.046705792421880006, | |
| "learning_rate": 0.0003829467192781829, | |
| "loss": 0.3906, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.31941544885177453, | |
| "grad_norm": 0.04135295822439203, | |
| "learning_rate": 0.00038267239406646595, | |
| "loss": 0.3203, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.32150313152400833, | |
| "grad_norm": 0.03936648551856702, | |
| "learning_rate": 0.000382395979843281, | |
| "loss": 0.3086, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.3235908141962422, | |
| "grad_norm": 0.04411342571947294, | |
| "learning_rate": 0.0003821174797696558, | |
| "loss": 0.3145, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.325678496868476, | |
| "grad_norm": 0.0440989009123292, | |
| "learning_rate": 0.0003818368970304713, | |
| "loss": 0.3398, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.3277661795407098, | |
| "grad_norm": 0.04623084733338215, | |
| "learning_rate": 0.00038155423483442565, | |
| "loss": 0.373, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.3298538622129436, | |
| "grad_norm": 0.03851250195995776, | |
| "learning_rate": 0.00038126949641399725, | |
| "loss": 0.2773, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.33194154488517746, | |
| "grad_norm": 0.04376944212115029, | |
| "learning_rate": 0.0003809826850254078, | |
| "loss": 0.3613, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.33402922755741127, | |
| "grad_norm": 0.044952970087986426, | |
| "learning_rate": 0.00038069380394858526, | |
| "loss": 0.2734, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.33611691022964507, | |
| "grad_norm": 0.04338817418637471, | |
| "learning_rate": 0.0003804028564871262, | |
| "loss": 0.334, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.33820459290187893, | |
| "grad_norm": 0.04540609013390802, | |
| "learning_rate": 0.00038010984596825774, | |
| "loss": 0.3086, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.34029227557411273, | |
| "grad_norm": 0.040675218084428574, | |
| "learning_rate": 0.00037981477574280015, | |
| "loss": 0.3105, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.34237995824634654, | |
| "grad_norm": 0.04628740772476471, | |
| "learning_rate": 0.000379517649185128, | |
| "loss": 0.375, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.3444676409185804, | |
| "grad_norm": 0.045320126800110626, | |
| "learning_rate": 0.0003792184696931318, | |
| "loss": 0.3926, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.3465553235908142, | |
| "grad_norm": 0.047079880323708634, | |
| "learning_rate": 0.00037891724068817896, | |
| "loss": 0.3613, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.348643006263048, | |
| "grad_norm": 0.042006341910680284, | |
| "learning_rate": 0.0003786139656150749, | |
| "loss": 0.3848, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.35073068893528186, | |
| "grad_norm": 0.042457959635259945, | |
| "learning_rate": 0.0003783086479420235, | |
| "loss": 0.3594, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.35281837160751567, | |
| "grad_norm": 0.04729445116359038, | |
| "learning_rate": 0.0003780012911605875, | |
| "loss": 0.4023, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.35490605427974947, | |
| "grad_norm": 0.04490557924422057, | |
| "learning_rate": 0.00037769189878564843, | |
| "loss": 0.3184, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.3569937369519833, | |
| "grad_norm": 0.04659805592024877, | |
| "learning_rate": 0.0003773804743553667, | |
| "loss": 0.3613, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.35908141962421714, | |
| "grad_norm": 0.0457558395135587, | |
| "learning_rate": 0.00037706702143114085, | |
| "loss": 0.3848, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.36116910229645094, | |
| "grad_norm": 0.042225345661087835, | |
| "learning_rate": 0.0003767515435975671, | |
| "loss": 0.3652, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.36325678496868474, | |
| "grad_norm": 0.03862535581909019, | |
| "learning_rate": 0.00037643404446239805, | |
| "loss": 0.2578, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.3653444676409186, | |
| "grad_norm": 0.04511245438490728, | |
| "learning_rate": 0.0003761145276565015, | |
| "loss": 0.3789, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.3674321503131524, | |
| "grad_norm": 0.04418007608895449, | |
| "learning_rate": 0.0003757929968338193, | |
| "loss": 0.375, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.3695198329853862, | |
| "grad_norm": 0.04131570246472954, | |
| "learning_rate": 0.00037546945567132486, | |
| "loss": 0.3262, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.37160751565762007, | |
| "grad_norm": 0.04432885957928793, | |
| "learning_rate": 0.0003751439078689819, | |
| "loss": 0.3105, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.3736951983298539, | |
| "grad_norm": 0.04130281228669527, | |
| "learning_rate": 0.0003748163571497015, | |
| "loss": 0.3145, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.3757828810020877, | |
| "grad_norm": 0.041235332477716484, | |
| "learning_rate": 0.00037448680725929987, | |
| "loss": 0.3086, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.3778705636743215, | |
| "grad_norm": 0.046591187622845885, | |
| "learning_rate": 0.00037415526196645535, | |
| "loss": 0.377, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.37995824634655534, | |
| "grad_norm": 0.043131270347982746, | |
| "learning_rate": 0.00037382172506266544, | |
| "loss": 0.3223, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.38204592901878914, | |
| "grad_norm": 0.04113286864493325, | |
| "learning_rate": 0.00037348620036220325, | |
| "loss": 0.3379, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.38413361169102295, | |
| "grad_norm": 0.045126082835651934, | |
| "learning_rate": 0.00037314869170207427, | |
| "loss": 0.3906, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.3862212943632568, | |
| "grad_norm": 0.0468308767634132, | |
| "learning_rate": 0.00037280920294197186, | |
| "loss": 0.418, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.3883089770354906, | |
| "grad_norm": 0.041909756539268275, | |
| "learning_rate": 0.0003724677379642338, | |
| "loss": 0.3008, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.3903966597077244, | |
| "grad_norm": 0.039249830774930286, | |
| "learning_rate": 0.0003721243006737973, | |
| "loss": 0.293, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.3924843423799583, | |
| "grad_norm": 0.043933299859950536, | |
| "learning_rate": 0.0003717788949981549, | |
| "loss": 0.3281, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.3945720250521921, | |
| "grad_norm": 0.045457197762689416, | |
| "learning_rate": 0.0003714315248873091, | |
| "loss": 0.416, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.3966597077244259, | |
| "grad_norm": 0.038856886092809004, | |
| "learning_rate": 0.00037108219431372734, | |
| "loss": 0.3242, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3987473903966597, | |
| "grad_norm": 0.03887481602609131, | |
| "learning_rate": 0.0003707309072722966, | |
| "loss": 0.3047, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.40083507306889354, | |
| "grad_norm": 0.04338847469385484, | |
| "learning_rate": 0.0003703776677802778, | |
| "loss": 0.3945, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.40292275574112735, | |
| "grad_norm": 0.04874209932196092, | |
| "learning_rate": 0.0003700224798772597, | |
| "loss": 0.4316, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.40501043841336115, | |
| "grad_norm": 0.0409033493154823, | |
| "learning_rate": 0.00036966534762511276, | |
| "loss": 0.3301, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.407098121085595, | |
| "grad_norm": 0.040056909386716535, | |
| "learning_rate": 0.0003693062751079427, | |
| "loss": 0.3887, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.4091858037578288, | |
| "grad_norm": 0.03764639519212107, | |
| "learning_rate": 0.0003689452664320437, | |
| "loss": 0.3125, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.4112734864300626, | |
| "grad_norm": 0.04012224378405559, | |
| "learning_rate": 0.00036858232572585173, | |
| "loss": 0.3574, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.4133611691022965, | |
| "grad_norm": 0.04149192047957985, | |
| "learning_rate": 0.00036821745713989706, | |
| "loss": 0.3242, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.4154488517745303, | |
| "grad_norm": 0.05017075921477889, | |
| "learning_rate": 0.00036785066484675683, | |
| "loss": 0.3652, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.4175365344467641, | |
| "grad_norm": 0.04227941718590804, | |
| "learning_rate": 0.00036748195304100735, | |
| "loss": 0.3242, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.4196242171189979, | |
| "grad_norm": 0.03768968002704521, | |
| "learning_rate": 0.00036711132593917635, | |
| "loss": 0.3105, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.42171189979123175, | |
| "grad_norm": 0.04194019808640771, | |
| "learning_rate": 0.00036673878777969426, | |
| "loss": 0.2891, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.42379958246346555, | |
| "grad_norm": 0.04542209099244845, | |
| "learning_rate": 0.00036636434282284633, | |
| "loss": 0.3359, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.42588726513569936, | |
| "grad_norm": 0.03795072966054866, | |
| "learning_rate": 0.0003659879953507235, | |
| "loss": 0.2178, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.4279749478079332, | |
| "grad_norm": 0.0676312462445889, | |
| "learning_rate": 0.0003656097496671737, | |
| "loss": 0.4004, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.430062630480167, | |
| "grad_norm": 0.06595187551394353, | |
| "learning_rate": 0.00036522961009775217, | |
| "loss": 0.3359, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.4321503131524008, | |
| "grad_norm": 0.048238875202028735, | |
| "learning_rate": 0.0003648475809896727, | |
| "loss": 0.375, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.4342379958246347, | |
| "grad_norm": 0.04774287102162698, | |
| "learning_rate": 0.00036446366671175735, | |
| "loss": 0.3613, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.4363256784968685, | |
| "grad_norm": 0.04447429752378728, | |
| "learning_rate": 0.0003640778716543866, | |
| "loss": 0.3594, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.4384133611691023, | |
| "grad_norm": 0.04069940254145397, | |
| "learning_rate": 0.00036369020022944946, | |
| "loss": 0.3105, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.4405010438413361, | |
| "grad_norm": 0.03646181755503713, | |
| "learning_rate": 0.0003633006568702924, | |
| "loss": 0.3047, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.44258872651356995, | |
| "grad_norm": 0.041622368664595316, | |
| "learning_rate": 0.00036290924603166945, | |
| "loss": 0.3418, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.44467640918580376, | |
| "grad_norm": 0.04687265549200718, | |
| "learning_rate": 0.00036251597218969045, | |
| "loss": 0.3926, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.44676409185803756, | |
| "grad_norm": 0.05629699568400707, | |
| "learning_rate": 0.0003621208398417705, | |
| "loss": 0.3848, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.4488517745302714, | |
| "grad_norm": 0.04583925885988264, | |
| "learning_rate": 0.0003617238535065782, | |
| "loss": 0.334, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.4509394572025052, | |
| "grad_norm": 0.04693126530215214, | |
| "learning_rate": 0.0003613250177239838, | |
| "loss": 0.3691, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.453027139874739, | |
| "grad_norm": 0.05342936402981673, | |
| "learning_rate": 0.00036092433705500797, | |
| "loss": 0.3301, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.4551148225469729, | |
| "grad_norm": 0.04089550326814837, | |
| "learning_rate": 0.000360521816081769, | |
| "loss": 0.2793, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.4572025052192067, | |
| "grad_norm": 0.04725556069406023, | |
| "learning_rate": 0.00036011745940743036, | |
| "loss": 0.3887, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.4592901878914405, | |
| "grad_norm": 0.04629637553798221, | |
| "learning_rate": 0.0003597112716561486, | |
| "loss": 0.3477, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.4613778705636743, | |
| "grad_norm": 0.042829084723361624, | |
| "learning_rate": 0.0003593032574730201, | |
| "loss": 0.3379, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.46346555323590816, | |
| "grad_norm": 0.04290003185482568, | |
| "learning_rate": 0.000358893421524028, | |
| "loss": 0.3828, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.46555323590814196, | |
| "grad_norm": 0.03695634767631775, | |
| "learning_rate": 0.0003584817684959888, | |
| "loss": 0.2539, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.46764091858037576, | |
| "grad_norm": 0.043249882204696076, | |
| "learning_rate": 0.00035806830309649887, | |
| "loss": 0.291, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.4697286012526096, | |
| "grad_norm": 0.0426499927716365, | |
| "learning_rate": 0.0003576530300538805, | |
| "loss": 0.3262, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.4718162839248434, | |
| "grad_norm": 0.038269992068262225, | |
| "learning_rate": 0.00035723595411712793, | |
| "loss": 0.2598, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.47390396659707723, | |
| "grad_norm": 0.039227521535724244, | |
| "learning_rate": 0.0003568170800558531, | |
| "loss": 0.2812, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.4759916492693111, | |
| "grad_norm": 0.0414687007362602, | |
| "learning_rate": 0.0003563964126602308, | |
| "loss": 0.3184, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.4780793319415449, | |
| "grad_norm": 0.06563073201350628, | |
| "learning_rate": 0.0003559739567409443, | |
| "loss": 0.2969, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.4801670146137787, | |
| "grad_norm": 0.04223079865704053, | |
| "learning_rate": 0.00035554971712913004, | |
| "loss": 0.334, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.4822546972860125, | |
| "grad_norm": 0.042708614625140126, | |
| "learning_rate": 0.00035512369867632236, | |
| "loss": 0.3418, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.48434237995824636, | |
| "grad_norm": 0.04063103807491919, | |
| "learning_rate": 0.00035469590625439843, | |
| "loss": 0.3477, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.48643006263048016, | |
| "grad_norm": 0.045147549230686396, | |
| "learning_rate": 0.00035426634475552187, | |
| "loss": 0.3516, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.48851774530271397, | |
| "grad_norm": 0.04106108808167586, | |
| "learning_rate": 0.00035383501909208755, | |
| "loss": 0.2812, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.4906054279749478, | |
| "grad_norm": 0.0428728366141268, | |
| "learning_rate": 0.0003534019341966647, | |
| "loss": 0.3398, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.49269311064718163, | |
| "grad_norm": 0.047772722106343216, | |
| "learning_rate": 0.00035296709502194115, | |
| "loss": 0.3848, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.49478079331941544, | |
| "grad_norm": 0.04629142912114304, | |
| "learning_rate": 0.000352530506540666, | |
| "loss": 0.2715, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.4968684759916493, | |
| "grad_norm": 0.05018937450568595, | |
| "learning_rate": 0.0003520921737455935, | |
| "loss": 0.3027, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.4989561586638831, | |
| "grad_norm": 0.04431763914166162, | |
| "learning_rate": 0.00035165210164942535, | |
| "loss": 0.3477, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.5010438413361169, | |
| "grad_norm": 0.04080914710858152, | |
| "learning_rate": 0.00035121029528475386, | |
| "loss": 0.3086, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5031315240083507, | |
| "grad_norm": 0.04222028074641329, | |
| "learning_rate": 0.0003507667597040038, | |
| "loss": 0.3672, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.5052192066805845, | |
| "grad_norm": 0.04367534710869874, | |
| "learning_rate": 0.0003503214999793752, | |
| "loss": 0.3223, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.5073068893528184, | |
| "grad_norm": 0.04813953280858226, | |
| "learning_rate": 0.00034987452120278505, | |
| "loss": 0.3984, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.5093945720250522, | |
| "grad_norm": 0.046413128010797784, | |
| "learning_rate": 0.00034942582848580925, | |
| "loss": 0.3848, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.511482254697286, | |
| "grad_norm": 0.036986531982911595, | |
| "learning_rate": 0.0003489754269596239, | |
| "loss": 0.3066, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.5135699373695198, | |
| "grad_norm": 0.040001114155947204, | |
| "learning_rate": 0.00034852332177494685, | |
| "loss": 0.2852, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.5156576200417536, | |
| "grad_norm": 0.04605884877382518, | |
| "learning_rate": 0.0003480695181019786, | |
| "loss": 0.3535, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.5177453027139874, | |
| "grad_norm": 0.0406658437248984, | |
| "learning_rate": 0.0003476140211303433, | |
| "loss": 0.334, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.5198329853862212, | |
| "grad_norm": 0.03909067687413047, | |
| "learning_rate": 0.0003471568360690296, | |
| "loss": 0.2871, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.5219206680584552, | |
| "grad_norm": 0.04831389973643467, | |
| "learning_rate": 0.00034669796814633055, | |
| "loss": 0.3398, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.524008350730689, | |
| "grad_norm": 0.04408464727595837, | |
| "learning_rate": 0.0003462374226097844, | |
| "loss": 0.3242, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 0.5260960334029228, | |
| "grad_norm": 0.03798160874377182, | |
| "learning_rate": 0.00034577520472611414, | |
| "loss": 0.252, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 0.5281837160751566, | |
| "grad_norm": 0.04625762493599334, | |
| "learning_rate": 0.0003453113197811675, | |
| "loss": 0.334, | |
| "step": 253 | |
| }, | |
| { | |
| "epoch": 0.5302713987473904, | |
| "grad_norm": 0.03757333278335248, | |
| "learning_rate": 0.0003448457730798564, | |
| "loss": 0.2734, | |
| "step": 254 | |
| }, | |
| { | |
| "epoch": 0.5323590814196242, | |
| "grad_norm": 0.038808091023111425, | |
| "learning_rate": 0.0003443785699460965, | |
| "loss": 0.3652, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.534446764091858, | |
| "grad_norm": 0.03797009162534055, | |
| "learning_rate": 0.0003439097157227459, | |
| "loss": 0.3223, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.5365344467640919, | |
| "grad_norm": 0.038601658815291935, | |
| "learning_rate": 0.0003434392157715445, | |
| "loss": 0.373, | |
| "step": 257 | |
| }, | |
| { | |
| "epoch": 0.5386221294363257, | |
| "grad_norm": 0.04680506137895324, | |
| "learning_rate": 0.00034296707547305235, | |
| "loss": 0.334, | |
| "step": 258 | |
| }, | |
| { | |
| "epoch": 0.5407098121085595, | |
| "grad_norm": 0.041499005757660336, | |
| "learning_rate": 0.0003424933002265883, | |
| "loss": 0.3457, | |
| "step": 259 | |
| }, | |
| { | |
| "epoch": 0.5427974947807933, | |
| "grad_norm": 0.04115196540841461, | |
| "learning_rate": 0.00034201789545016824, | |
| "loss": 0.3242, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.5448851774530271, | |
| "grad_norm": 0.04340048389863411, | |
| "learning_rate": 0.00034154086658044297, | |
| "loss": 0.2949, | |
| "step": 261 | |
| }, | |
| { | |
| "epoch": 0.5469728601252609, | |
| "grad_norm": 0.04378711119505926, | |
| "learning_rate": 0.0003410622190726362, | |
| "loss": 0.3066, | |
| "step": 262 | |
| }, | |
| { | |
| "epoch": 0.5490605427974948, | |
| "grad_norm": 0.04643412727247773, | |
| "learning_rate": 0.00034058195840048225, | |
| "loss": 0.3652, | |
| "step": 263 | |
| }, | |
| { | |
| "epoch": 0.5511482254697286, | |
| "grad_norm": 0.042043153196747375, | |
| "learning_rate": 0.0003401000900561631, | |
| "loss": 0.3789, | |
| "step": 264 | |
| }, | |
| { | |
| "epoch": 0.5532359081419624, | |
| "grad_norm": 0.040040695052475275, | |
| "learning_rate": 0.00033961661955024595, | |
| "loss": 0.3145, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.5553235908141962, | |
| "grad_norm": 0.038423365462145156, | |
| "learning_rate": 0.0003391315524116202, | |
| "loss": 0.3223, | |
| "step": 266 | |
| }, | |
| { | |
| "epoch": 0.55741127348643, | |
| "grad_norm": 0.038539884775496434, | |
| "learning_rate": 0.00033864489418743355, | |
| "loss": 0.3086, | |
| "step": 267 | |
| }, | |
| { | |
| "epoch": 0.5594989561586639, | |
| "grad_norm": 0.04422443806977669, | |
| "learning_rate": 0.0003381566504430298, | |
| "loss": 0.3945, | |
| "step": 268 | |
| }, | |
| { | |
| "epoch": 0.5615866388308977, | |
| "grad_norm": 0.0377661934221125, | |
| "learning_rate": 0.00033766682676188386, | |
| "loss": 0.3398, | |
| "step": 269 | |
| }, | |
| { | |
| "epoch": 0.5636743215031316, | |
| "grad_norm": 0.036534819703584714, | |
| "learning_rate": 0.00033717542874553896, | |
| "loss": 0.3418, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.5657620041753654, | |
| "grad_norm": 0.04440346943789282, | |
| "learning_rate": 0.0003366824620135421, | |
| "loss": 0.3984, | |
| "step": 271 | |
| }, | |
| { | |
| "epoch": 0.5678496868475992, | |
| "grad_norm": 0.037034787962705526, | |
| "learning_rate": 0.00033618793220337965, | |
| "loss": 0.2812, | |
| "step": 272 | |
| }, | |
| { | |
| "epoch": 0.569937369519833, | |
| "grad_norm": 0.03927620752143223, | |
| "learning_rate": 0.0003356918449704133, | |
| "loss": 0.2695, | |
| "step": 273 | |
| }, | |
| { | |
| "epoch": 0.5720250521920668, | |
| "grad_norm": 0.04369797108825467, | |
| "learning_rate": 0.00033519420598781505, | |
| "loss": 0.3477, | |
| "step": 274 | |
| }, | |
| { | |
| "epoch": 0.5741127348643006, | |
| "grad_norm": 0.0379874901088521, | |
| "learning_rate": 0.0003346950209465025, | |
| "loss": 0.248, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.5762004175365344, | |
| "grad_norm": 0.042755557159503056, | |
| "learning_rate": 0.0003341942955550738, | |
| "loss": 0.3242, | |
| "step": 276 | |
| }, | |
| { | |
| "epoch": 0.5782881002087683, | |
| "grad_norm": 0.039892604797522456, | |
| "learning_rate": 0.0003336920355397421, | |
| "loss": 0.3125, | |
| "step": 277 | |
| }, | |
| { | |
| "epoch": 0.5803757828810021, | |
| "grad_norm": 0.037020438684393274, | |
| "learning_rate": 0.0003331882466442704, | |
| "loss": 0.2832, | |
| "step": 278 | |
| }, | |
| { | |
| "epoch": 0.5824634655532359, | |
| "grad_norm": 0.0448544789689836, | |
| "learning_rate": 0.0003326829346299057, | |
| "loss": 0.3613, | |
| "step": 279 | |
| }, | |
| { | |
| "epoch": 0.5845511482254697, | |
| "grad_norm": 0.037923744133296695, | |
| "learning_rate": 0.00033217610527531315, | |
| "loss": 0.3262, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.5866388308977035, | |
| "grad_norm": 0.039536585438320955, | |
| "learning_rate": 0.00033166776437650985, | |
| "loss": 0.3301, | |
| "step": 281 | |
| }, | |
| { | |
| "epoch": 0.5887265135699373, | |
| "grad_norm": 0.042309874793804436, | |
| "learning_rate": 0.0003311579177467988, | |
| "loss": 0.3281, | |
| "step": 282 | |
| }, | |
| { | |
| "epoch": 0.5908141962421712, | |
| "grad_norm": 0.04247524788442754, | |
| "learning_rate": 0.0003306465712167023, | |
| "loss": 0.3281, | |
| "step": 283 | |
| }, | |
| { | |
| "epoch": 0.592901878914405, | |
| "grad_norm": 0.038298773282535274, | |
| "learning_rate": 0.0003301337306338951, | |
| "loss": 0.3066, | |
| "step": 284 | |
| }, | |
| { | |
| "epoch": 0.5949895615866388, | |
| "grad_norm": 0.04845680190635201, | |
| "learning_rate": 0.00032961940186313784, | |
| "loss": 0.3867, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.5970772442588727, | |
| "grad_norm": 0.04425999769437242, | |
| "learning_rate": 0.00032910359078620974, | |
| "loss": 0.3848, | |
| "step": 286 | |
| }, | |
| { | |
| "epoch": 0.5991649269311065, | |
| "grad_norm": 0.04350677333004508, | |
| "learning_rate": 0.0003285863033018416, | |
| "loss": 0.3359, | |
| "step": 287 | |
| }, | |
| { | |
| "epoch": 0.6012526096033403, | |
| "grad_norm": 0.04313248762187342, | |
| "learning_rate": 0.00032806754532564795, | |
| "loss": 0.3281, | |
| "step": 288 | |
| }, | |
| { | |
| "epoch": 0.6033402922755741, | |
| "grad_norm": 0.04172934704529679, | |
| "learning_rate": 0.00032754732279005974, | |
| "loss": 0.3887, | |
| "step": 289 | |
| }, | |
| { | |
| "epoch": 0.605427974947808, | |
| "grad_norm": 0.053530546182614214, | |
| "learning_rate": 0.0003270256416442564, | |
| "loss": 0.4512, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6075156576200418, | |
| "grad_norm": 0.0585588589735363, | |
| "learning_rate": 0.0003265025078540977, | |
| "loss": 0.4023, | |
| "step": 291 | |
| }, | |
| { | |
| "epoch": 0.6096033402922756, | |
| "grad_norm": 0.046794215765212366, | |
| "learning_rate": 0.00032597792740205573, | |
| "loss": 0.2871, | |
| "step": 292 | |
| }, | |
| { | |
| "epoch": 0.6116910229645094, | |
| "grad_norm": 0.04076085980117636, | |
| "learning_rate": 0.0003254519062871462, | |
| "loss": 0.3027, | |
| "step": 293 | |
| }, | |
| { | |
| "epoch": 0.6137787056367432, | |
| "grad_norm": 0.039930806408977594, | |
| "learning_rate": 0.0003249244505248603, | |
| "loss": 0.3086, | |
| "step": 294 | |
| }, | |
| { | |
| "epoch": 0.615866388308977, | |
| "grad_norm": 0.047339086645206876, | |
| "learning_rate": 0.0003243955661470952, | |
| "loss": 0.373, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.6179540709812108, | |
| "grad_norm": 0.041035006122051215, | |
| "learning_rate": 0.00032386525920208574, | |
| "loss": 0.3281, | |
| "step": 296 | |
| }, | |
| { | |
| "epoch": 0.6200417536534447, | |
| "grad_norm": 0.05042533676976384, | |
| "learning_rate": 0.000323333535754335, | |
| "loss": 0.293, | |
| "step": 297 | |
| }, | |
| { | |
| "epoch": 0.6221294363256785, | |
| "grad_norm": 0.043959165085212505, | |
| "learning_rate": 0.00032280040188454494, | |
| "loss": 0.3125, | |
| "step": 298 | |
| }, | |
| { | |
| "epoch": 0.6242171189979123, | |
| "grad_norm": 0.04458635918949398, | |
| "learning_rate": 0.0003222658636895469, | |
| "loss": 0.3105, | |
| "step": 299 | |
| }, | |
| { | |
| "epoch": 0.6263048016701461, | |
| "grad_norm": 0.04146656432155535, | |
| "learning_rate": 0.0003217299272822316, | |
| "loss": 0.3672, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6283924843423799, | |
| "grad_norm": 0.04285146681080291, | |
| "learning_rate": 0.00032119259879147987, | |
| "loss": 0.2773, | |
| "step": 301 | |
| }, | |
| { | |
| "epoch": 0.6304801670146137, | |
| "grad_norm": 0.04012756626164825, | |
| "learning_rate": 0.0003206538843620919, | |
| "loss": 0.3145, | |
| "step": 302 | |
| }, | |
| { | |
| "epoch": 0.6325678496868476, | |
| "grad_norm": 0.03395547556102837, | |
| "learning_rate": 0.0003201137901547175, | |
| "loss": 0.2344, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.6346555323590815, | |
| "grad_norm": 0.038474167468473125, | |
| "learning_rate": 0.00031957232234578506, | |
| "loss": 0.3105, | |
| "step": 304 | |
| }, | |
| { | |
| "epoch": 0.6367432150313153, | |
| "grad_norm": 0.04345850995580467, | |
| "learning_rate": 0.00031902948712743146, | |
| "loss": 0.3711, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.6388308977035491, | |
| "grad_norm": 0.04561991595493648, | |
| "learning_rate": 0.00031848529070743113, | |
| "loss": 0.3281, | |
| "step": 306 | |
| }, | |
| { | |
| "epoch": 0.6409185803757829, | |
| "grad_norm": 0.043639261730465534, | |
| "learning_rate": 0.00031793973930912465, | |
| "loss": 0.3535, | |
| "step": 307 | |
| }, | |
| { | |
| "epoch": 0.6430062630480167, | |
| "grad_norm": 0.041614849807266444, | |
| "learning_rate": 0.00031739283917134835, | |
| "loss": 0.293, | |
| "step": 308 | |
| }, | |
| { | |
| "epoch": 0.6450939457202505, | |
| "grad_norm": 0.037829924843835225, | |
| "learning_rate": 0.00031684459654836224, | |
| "loss": 0.2793, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 0.6471816283924844, | |
| "grad_norm": 0.043593747485495736, | |
| "learning_rate": 0.00031629501770977865, | |
| "loss": 0.2871, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.6492693110647182, | |
| "grad_norm": 0.043504619259611736, | |
| "learning_rate": 0.0003157441089404911, | |
| "loss": 0.377, | |
| "step": 311 | |
| }, | |
| { | |
| "epoch": 0.651356993736952, | |
| "grad_norm": 0.042969570910675, | |
| "learning_rate": 0.00031519187654060153, | |
| "loss": 0.3535, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 0.6534446764091858, | |
| "grad_norm": 0.04444146348618118, | |
| "learning_rate": 0.0003146383268253489, | |
| "loss": 0.3633, | |
| "step": 313 | |
| }, | |
| { | |
| "epoch": 0.6555323590814196, | |
| "grad_norm": 0.0579233541293356, | |
| "learning_rate": 0.00031408346612503684, | |
| "loss": 0.2852, | |
| "step": 314 | |
| }, | |
| { | |
| "epoch": 0.6576200417536534, | |
| "grad_norm": 0.04116612592402649, | |
| "learning_rate": 0.00031352730078496103, | |
| "loss": 0.3184, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.6597077244258872, | |
| "grad_norm": 0.042619485954835536, | |
| "learning_rate": 0.0003129698371653368, | |
| "loss": 0.3555, | |
| "step": 316 | |
| }, | |
| { | |
| "epoch": 0.6617954070981211, | |
| "grad_norm": 0.03978568537420636, | |
| "learning_rate": 0.00031241108164122667, | |
| "loss": 0.3184, | |
| "step": 317 | |
| }, | |
| { | |
| "epoch": 0.6638830897703549, | |
| "grad_norm": 0.04398595731711137, | |
| "learning_rate": 0.0003118510406024667, | |
| "loss": 0.3262, | |
| "step": 318 | |
| }, | |
| { | |
| "epoch": 0.6659707724425887, | |
| "grad_norm": 0.03557319884714253, | |
| "learning_rate": 0.0003112897204535943, | |
| "loss": 0.2539, | |
| "step": 319 | |
| }, | |
| { | |
| "epoch": 0.6680584551148225, | |
| "grad_norm": 0.04098653755334443, | |
| "learning_rate": 0.0003107271276137745, | |
| "loss": 0.3711, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.6701461377870563, | |
| "grad_norm": 0.0377819264468114, | |
| "learning_rate": 0.0003101632685167263, | |
| "loss": 0.2891, | |
| "step": 321 | |
| }, | |
| { | |
| "epoch": 0.6722338204592901, | |
| "grad_norm": 0.038417025071128603, | |
| "learning_rate": 0.00030959814961064973, | |
| "loss": 0.2891, | |
| "step": 322 | |
| }, | |
| { | |
| "epoch": 0.6743215031315241, | |
| "grad_norm": 0.038602243878265344, | |
| "learning_rate": 0.0003090317773581518, | |
| "loss": 0.2734, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 0.6764091858037579, | |
| "grad_norm": 0.0487821791964808, | |
| "learning_rate": 0.0003084641582361723, | |
| "loss": 0.3574, | |
| "step": 324 | |
| }, | |
| { | |
| "epoch": 0.6784968684759917, | |
| "grad_norm": 0.04544872266451946, | |
| "learning_rate": 0.00030789529873591034, | |
| "loss": 0.3008, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.6805845511482255, | |
| "grad_norm": 0.0373160417363858, | |
| "learning_rate": 0.0003073252053627496, | |
| "loss": 0.2871, | |
| "step": 326 | |
| }, | |
| { | |
| "epoch": 0.6826722338204593, | |
| "grad_norm": 0.04358868533626725, | |
| "learning_rate": 0.0003067538846361841, | |
| "loss": 0.3066, | |
| "step": 327 | |
| }, | |
| { | |
| "epoch": 0.6847599164926931, | |
| "grad_norm": 0.040357136071415334, | |
| "learning_rate": 0.0003061813430897439, | |
| "loss": 0.3516, | |
| "step": 328 | |
| }, | |
| { | |
| "epoch": 0.6868475991649269, | |
| "grad_norm": 0.04862770193509928, | |
| "learning_rate": 0.0003056075872709199, | |
| "loss": 0.4062, | |
| "step": 329 | |
| }, | |
| { | |
| "epoch": 0.6889352818371608, | |
| "grad_norm": 0.04688881879011172, | |
| "learning_rate": 0.00030503262374108927, | |
| "loss": 0.4434, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.6910229645093946, | |
| "grad_norm": 0.04276287817278344, | |
| "learning_rate": 0.00030445645907544057, | |
| "loss": 0.3105, | |
| "step": 331 | |
| }, | |
| { | |
| "epoch": 0.6931106471816284, | |
| "grad_norm": 0.03644902974160357, | |
| "learning_rate": 0.0003038790998628981, | |
| "loss": 0.249, | |
| "step": 332 | |
| }, | |
| { | |
| "epoch": 0.6951983298538622, | |
| "grad_norm": 0.03602308126572102, | |
| "learning_rate": 0.0003033005527060469, | |
| "loss": 0.2891, | |
| "step": 333 | |
| }, | |
| { | |
| "epoch": 0.697286012526096, | |
| "grad_norm": 0.0397502232733115, | |
| "learning_rate": 0.0003027208242210571, | |
| "loss": 0.334, | |
| "step": 334 | |
| }, | |
| { | |
| "epoch": 0.6993736951983298, | |
| "grad_norm": 0.042614553937913115, | |
| "learning_rate": 0.0003021399210376084, | |
| "loss": 0.3281, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.7014613778705637, | |
| "grad_norm": 0.040360864280806166, | |
| "learning_rate": 0.0003015578497988142, | |
| "loss": 0.3145, | |
| "step": 336 | |
| }, | |
| { | |
| "epoch": 0.7035490605427975, | |
| "grad_norm": 0.03697519450333998, | |
| "learning_rate": 0.0003009746171611454, | |
| "loss": 0.2852, | |
| "step": 337 | |
| }, | |
| { | |
| "epoch": 0.7056367432150313, | |
| "grad_norm": 0.03920168369252357, | |
| "learning_rate": 0.0003003902297943545, | |
| "loss": 0.2578, | |
| "step": 338 | |
| }, | |
| { | |
| "epoch": 0.7077244258872651, | |
| "grad_norm": 0.04596701530986532, | |
| "learning_rate": 0.00029980469438139947, | |
| "loss": 0.3066, | |
| "step": 339 | |
| }, | |
| { | |
| "epoch": 0.7098121085594989, | |
| "grad_norm": 0.03590053569859233, | |
| "learning_rate": 0.00029921801761836696, | |
| "loss": 0.2598, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7118997912317327, | |
| "grad_norm": 0.039137959610818826, | |
| "learning_rate": 0.0002986302062143961, | |
| "loss": 0.3398, | |
| "step": 341 | |
| }, | |
| { | |
| "epoch": 0.7139874739039666, | |
| "grad_norm": 0.04445491792602957, | |
| "learning_rate": 0.0002980412668916013, | |
| "loss": 0.3398, | |
| "step": 342 | |
| }, | |
| { | |
| "epoch": 0.7160751565762005, | |
| "grad_norm": 0.040340445047287156, | |
| "learning_rate": 0.00029745120638499596, | |
| "loss": 0.3516, | |
| "step": 343 | |
| }, | |
| { | |
| "epoch": 0.7181628392484343, | |
| "grad_norm": 0.04520397395484933, | |
| "learning_rate": 0.00029686003144241494, | |
| "loss": 0.3555, | |
| "step": 344 | |
| }, | |
| { | |
| "epoch": 0.7202505219206681, | |
| "grad_norm": 0.045695108134425295, | |
| "learning_rate": 0.0002962677488244378, | |
| "loss": 0.2871, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.7223382045929019, | |
| "grad_norm": 0.0414553610811136, | |
| "learning_rate": 0.0002956743653043111, | |
| "loss": 0.3828, | |
| "step": 346 | |
| }, | |
| { | |
| "epoch": 0.7244258872651357, | |
| "grad_norm": 0.043267589906942225, | |
| "learning_rate": 0.00029507988766787135, | |
| "loss": 0.3262, | |
| "step": 347 | |
| }, | |
| { | |
| "epoch": 0.7265135699373695, | |
| "grad_norm": 0.03665181460868117, | |
| "learning_rate": 0.0002944843227134669, | |
| "loss": 0.3301, | |
| "step": 348 | |
| }, | |
| { | |
| "epoch": 0.7286012526096033, | |
| "grad_norm": 0.04220684212991339, | |
| "learning_rate": 0.00029388767725188077, | |
| "loss": 0.3223, | |
| "step": 349 | |
| }, | |
| { | |
| "epoch": 0.7306889352818372, | |
| "grad_norm": 0.04468007941423499, | |
| "learning_rate": 0.0002932899581062524, | |
| "loss": 0.2949, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.732776617954071, | |
| "grad_norm": 0.044433764048658723, | |
| "learning_rate": 0.00029269117211199955, | |
| "loss": 0.377, | |
| "step": 351 | |
| }, | |
| { | |
| "epoch": 0.7348643006263048, | |
| "grad_norm": 0.046209035174827495, | |
| "learning_rate": 0.0002920913261167405, | |
| "loss": 0.3691, | |
| "step": 352 | |
| }, | |
| { | |
| "epoch": 0.7369519832985386, | |
| "grad_norm": 0.040034133910176115, | |
| "learning_rate": 0.00029149042698021544, | |
| "loss": 0.3105, | |
| "step": 353 | |
| }, | |
| { | |
| "epoch": 0.7390396659707724, | |
| "grad_norm": 0.04609016944022695, | |
| "learning_rate": 0.00029088848157420813, | |
| "loss": 0.3984, | |
| "step": 354 | |
| }, | |
| { | |
| "epoch": 0.7411273486430062, | |
| "grad_norm": 0.04480775819697267, | |
| "learning_rate": 0.00029028549678246715, | |
| "loss": 0.332, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.7432150313152401, | |
| "grad_norm": 0.04387465996335384, | |
| "learning_rate": 0.0002896814795006277, | |
| "loss": 0.3984, | |
| "step": 356 | |
| }, | |
| { | |
| "epoch": 0.7453027139874739, | |
| "grad_norm": 0.041864619992452806, | |
| "learning_rate": 0.0002890764366361318, | |
| "loss": 0.3633, | |
| "step": 357 | |
| }, | |
| { | |
| "epoch": 0.7473903966597077, | |
| "grad_norm": 0.03717012201708029, | |
| "learning_rate": 0.0002884703751081504, | |
| "loss": 0.2324, | |
| "step": 358 | |
| }, | |
| { | |
| "epoch": 0.7494780793319415, | |
| "grad_norm": 0.043251857860819434, | |
| "learning_rate": 0.0002878633018475035, | |
| "loss": 0.3066, | |
| "step": 359 | |
| }, | |
| { | |
| "epoch": 0.7515657620041754, | |
| "grad_norm": 0.03591327407860244, | |
| "learning_rate": 0.00028725522379658097, | |
| "loss": 0.2852, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.7536534446764092, | |
| "grad_norm": 0.0381908896371197, | |
| "learning_rate": 0.00028664614790926355, | |
| "loss": 0.3477, | |
| "step": 361 | |
| }, | |
| { | |
| "epoch": 0.755741127348643, | |
| "grad_norm": 0.04392871145759733, | |
| "learning_rate": 0.000286036081150843, | |
| "loss": 0.4414, | |
| "step": 362 | |
| }, | |
| { | |
| "epoch": 0.7578288100208769, | |
| "grad_norm": 0.04392250979416697, | |
| "learning_rate": 0.0002854250304979423, | |
| "loss": 0.2832, | |
| "step": 363 | |
| }, | |
| { | |
| "epoch": 0.7599164926931107, | |
| "grad_norm": 0.03445036859255453, | |
| "learning_rate": 0.0002848130029384363, | |
| "loss": 0.3027, | |
| "step": 364 | |
| }, | |
| { | |
| "epoch": 0.7620041753653445, | |
| "grad_norm": 0.03588315051937214, | |
| "learning_rate": 0.00028420000547137175, | |
| "loss": 0.2793, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.7640918580375783, | |
| "grad_norm": 0.043623631697390225, | |
| "learning_rate": 0.00028358604510688666, | |
| "loss": 0.3301, | |
| "step": 366 | |
| }, | |
| { | |
| "epoch": 0.7661795407098121, | |
| "grad_norm": 0.044899266192686, | |
| "learning_rate": 0.000282971128866131, | |
| "loss": 0.2988, | |
| "step": 367 | |
| }, | |
| { | |
| "epoch": 0.7682672233820459, | |
| "grad_norm": 0.044441044360934975, | |
| "learning_rate": 0.0002823552637811857, | |
| "loss": 0.3262, | |
| "step": 368 | |
| }, | |
| { | |
| "epoch": 0.7703549060542797, | |
| "grad_norm": 0.04617336800060674, | |
| "learning_rate": 0.00028173845689498286, | |
| "loss": 0.2812, | |
| "step": 369 | |
| }, | |
| { | |
| "epoch": 0.7724425887265136, | |
| "grad_norm": 0.03481084134760086, | |
| "learning_rate": 0.0002811207152612243, | |
| "loss": 0.2637, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.7745302713987474, | |
| "grad_norm": 0.04359599728927467, | |
| "learning_rate": 0.000280502045944302, | |
| "loss": 0.3516, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 0.7766179540709812, | |
| "grad_norm": 0.03910171209605865, | |
| "learning_rate": 0.0002798824560192164, | |
| "loss": 0.2793, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 0.778705636743215, | |
| "grad_norm": 0.04498875604580853, | |
| "learning_rate": 0.0002792619525714962, | |
| "loss": 0.334, | |
| "step": 373 | |
| }, | |
| { | |
| "epoch": 0.7807933194154488, | |
| "grad_norm": 0.046945206399684386, | |
| "learning_rate": 0.00027864054269711666, | |
| "loss": 0.3262, | |
| "step": 374 | |
| }, | |
| { | |
| "epoch": 0.7828810020876826, | |
| "grad_norm": 0.03720210905131818, | |
| "learning_rate": 0.0002780182335024191, | |
| "loss": 0.252, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.7849686847599165, | |
| "grad_norm": 0.03971464889707817, | |
| "learning_rate": 0.00027739503210402907, | |
| "loss": 0.334, | |
| "step": 376 | |
| }, | |
| { | |
| "epoch": 0.7870563674321504, | |
| "grad_norm": 0.047941221203412314, | |
| "learning_rate": 0.00027677094562877534, | |
| "loss": 0.3066, | |
| "step": 377 | |
| }, | |
| { | |
| "epoch": 0.7891440501043842, | |
| "grad_norm": 0.04116312254250454, | |
| "learning_rate": 0.0002761459812136083, | |
| "loss": 0.3652, | |
| "step": 378 | |
| }, | |
| { | |
| "epoch": 0.791231732776618, | |
| "grad_norm": 0.0408608818482939, | |
| "learning_rate": 0.00027552014600551813, | |
| "loss": 0.3359, | |
| "step": 379 | |
| }, | |
| { | |
| "epoch": 0.7933194154488518, | |
| "grad_norm": 0.039519399095894796, | |
| "learning_rate": 0.00027489344716145344, | |
| "loss": 0.3105, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.7954070981210856, | |
| "grad_norm": 0.04468756187207934, | |
| "learning_rate": 0.0002742658918482392, | |
| "loss": 0.334, | |
| "step": 381 | |
| }, | |
| { | |
| "epoch": 0.7974947807933194, | |
| "grad_norm": 0.03799026650779495, | |
| "learning_rate": 0.00027363748724249485, | |
| "loss": 0.3125, | |
| "step": 382 | |
| }, | |
| { | |
| "epoch": 0.7995824634655533, | |
| "grad_norm": 0.05260052294591301, | |
| "learning_rate": 0.000273008240530552, | |
| "loss": 0.4316, | |
| "step": 383 | |
| }, | |
| { | |
| "epoch": 0.8016701461377871, | |
| "grad_norm": 0.04069771345832873, | |
| "learning_rate": 0.00027237815890837283, | |
| "loss": 0.2969, | |
| "step": 384 | |
| }, | |
| { | |
| "epoch": 0.8037578288100209, | |
| "grad_norm": 0.039739122060491815, | |
| "learning_rate": 0.000271747249581467, | |
| "loss": 0.3555, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.8058455114822547, | |
| "grad_norm": 0.04610276901781518, | |
| "learning_rate": 0.00027111551976480996, | |
| "loss": 0.3574, | |
| "step": 386 | |
| }, | |
| { | |
| "epoch": 0.8079331941544885, | |
| "grad_norm": 0.04374672870028719, | |
| "learning_rate": 0.00027048297668276, | |
| "loss": 0.3047, | |
| "step": 387 | |
| }, | |
| { | |
| "epoch": 0.8100208768267223, | |
| "grad_norm": 0.04631564065384605, | |
| "learning_rate": 0.0002698496275689758, | |
| "loss": 0.2871, | |
| "step": 388 | |
| }, | |
| { | |
| "epoch": 0.8121085594989561, | |
| "grad_norm": 0.049176525271089545, | |
| "learning_rate": 0.00026921547966633373, | |
| "loss": 0.2871, | |
| "step": 389 | |
| }, | |
| { | |
| "epoch": 0.81419624217119, | |
| "grad_norm": 0.040234387326007176, | |
| "learning_rate": 0.0002685805402268449, | |
| "loss": 0.3301, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.8162839248434238, | |
| "grad_norm": 0.044678234660364975, | |
| "learning_rate": 0.0002679448165115725, | |
| "loss": 0.2852, | |
| "step": 391 | |
| }, | |
| { | |
| "epoch": 0.8183716075156576, | |
| "grad_norm": 0.043749807508504976, | |
| "learning_rate": 0.00026730831579054823, | |
| "loss": 0.3418, | |
| "step": 392 | |
| }, | |
| { | |
| "epoch": 0.8204592901878914, | |
| "grad_norm": 0.03702597280717622, | |
| "learning_rate": 0.00026667104534268984, | |
| "loss": 0.334, | |
| "step": 393 | |
| }, | |
| { | |
| "epoch": 0.8225469728601252, | |
| "grad_norm": 0.04051649281576064, | |
| "learning_rate": 0.00026603301245571733, | |
| "loss": 0.3652, | |
| "step": 394 | |
| }, | |
| { | |
| "epoch": 0.824634655532359, | |
| "grad_norm": 0.04070349409408308, | |
| "learning_rate": 0.00026539422442607, | |
| "loss": 0.3164, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.826722338204593, | |
| "grad_norm": 0.03685594277413145, | |
| "learning_rate": 0.0002647546885588227, | |
| "loss": 0.2695, | |
| "step": 396 | |
| }, | |
| { | |
| "epoch": 0.8288100208768268, | |
| "grad_norm": 0.033959522628423805, | |
| "learning_rate": 0.00026411441216760243, | |
| "loss": 0.2422, | |
| "step": 397 | |
| }, | |
| { | |
| "epoch": 0.8308977035490606, | |
| "grad_norm": 0.03649515909769935, | |
| "learning_rate": 0.00026347340257450473, | |
| "loss": 0.252, | |
| "step": 398 | |
| }, | |
| { | |
| "epoch": 0.8329853862212944, | |
| "grad_norm": 0.05036202800279952, | |
| "learning_rate": 0.00026283166711001, | |
| "loss": 0.3203, | |
| "step": 399 | |
| }, | |
| { | |
| "epoch": 0.8350730688935282, | |
| "grad_norm": 0.04724871652205951, | |
| "learning_rate": 0.0002621892131128994, | |
| "loss": 0.3555, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.837160751565762, | |
| "grad_norm": 0.04523467438521327, | |
| "learning_rate": 0.0002615460479301714, | |
| "loss": 0.3809, | |
| "step": 401 | |
| }, | |
| { | |
| "epoch": 0.8392484342379958, | |
| "grad_norm": 0.04240307801356352, | |
| "learning_rate": 0.0002609021789169571, | |
| "loss": 0.3574, | |
| "step": 402 | |
| }, | |
| { | |
| "epoch": 0.8413361169102297, | |
| "grad_norm": 0.03998645664485665, | |
| "learning_rate": 0.0002602576134364369, | |
| "loss": 0.3008, | |
| "step": 403 | |
| }, | |
| { | |
| "epoch": 0.8434237995824635, | |
| "grad_norm": 0.045774083398434376, | |
| "learning_rate": 0.0002596123588597555, | |
| "loss": 0.4043, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.8455114822546973, | |
| "grad_norm": 0.043894774464113946, | |
| "learning_rate": 0.0002589664225659382, | |
| "loss": 0.3301, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.8475991649269311, | |
| "grad_norm": 0.04133334314623751, | |
| "learning_rate": 0.0002583198119418063, | |
| "loss": 0.2637, | |
| "step": 406 | |
| }, | |
| { | |
| "epoch": 0.8496868475991649, | |
| "grad_norm": 0.037286338050560974, | |
| "learning_rate": 0.00025767253438189255, | |
| "loss": 0.3164, | |
| "step": 407 | |
| }, | |
| { | |
| "epoch": 0.8517745302713987, | |
| "grad_norm": 0.04288387043023777, | |
| "learning_rate": 0.0002570245972883568, | |
| "loss": 0.3164, | |
| "step": 408 | |
| }, | |
| { | |
| "epoch": 0.8538622129436325, | |
| "grad_norm": 0.04117605927774004, | |
| "learning_rate": 0.000256376008070901, | |
| "loss": 0.3457, | |
| "step": 409 | |
| }, | |
| { | |
| "epoch": 0.8559498956158664, | |
| "grad_norm": 0.040660730568059064, | |
| "learning_rate": 0.00025572677414668497, | |
| "loss": 0.3691, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.8580375782881002, | |
| "grad_norm": 0.03473424285365033, | |
| "learning_rate": 0.00025507690294024105, | |
| "loss": 0.252, | |
| "step": 411 | |
| }, | |
| { | |
| "epoch": 0.860125260960334, | |
| "grad_norm": 0.04106073793621654, | |
| "learning_rate": 0.00025442640188338945, | |
| "loss": 0.2871, | |
| "step": 412 | |
| }, | |
| { | |
| "epoch": 0.8622129436325678, | |
| "grad_norm": 0.03780921487044816, | |
| "learning_rate": 0.00025377527841515343, | |
| "loss": 0.2969, | |
| "step": 413 | |
| }, | |
| { | |
| "epoch": 0.8643006263048016, | |
| "grad_norm": 0.03779696762506096, | |
| "learning_rate": 0.00025312353998167397, | |
| "loss": 0.291, | |
| "step": 414 | |
| }, | |
| { | |
| "epoch": 0.8663883089770354, | |
| "grad_norm": 0.04781732258675552, | |
| "learning_rate": 0.00025247119403612456, | |
| "loss": 0.3301, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.8684759916492694, | |
| "grad_norm": 0.03912752520944136, | |
| "learning_rate": 0.0002518182480386261, | |
| "loss": 0.3477, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.8705636743215032, | |
| "grad_norm": 0.043540503401128916, | |
| "learning_rate": 0.0002511647094561619, | |
| "loss": 0.3398, | |
| "step": 417 | |
| }, | |
| { | |
| "epoch": 0.872651356993737, | |
| "grad_norm": 0.04207586943763798, | |
| "learning_rate": 0.00025051058576249153, | |
| "loss": 0.3398, | |
| "step": 418 | |
| }, | |
| { | |
| "epoch": 0.8747390396659708, | |
| "grad_norm": 0.03596881591749821, | |
| "learning_rate": 0.00024985588443806624, | |
| "loss": 0.2715, | |
| "step": 419 | |
| }, | |
| { | |
| "epoch": 0.8768267223382046, | |
| "grad_norm": 0.042287007835322306, | |
| "learning_rate": 0.00024920061296994253, | |
| "loss": 0.3086, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.8789144050104384, | |
| "grad_norm": 0.03856865323635185, | |
| "learning_rate": 0.00024854477885169734, | |
| "loss": 0.2871, | |
| "step": 421 | |
| }, | |
| { | |
| "epoch": 0.8810020876826722, | |
| "grad_norm": 0.03879769693027103, | |
| "learning_rate": 0.0002478883895833417, | |
| "loss": 0.2617, | |
| "step": 422 | |
| }, | |
| { | |
| "epoch": 0.8830897703549061, | |
| "grad_norm": 0.04500551833152804, | |
| "learning_rate": 0.00024723145267123556, | |
| "loss": 0.2793, | |
| "step": 423 | |
| }, | |
| { | |
| "epoch": 0.8851774530271399, | |
| "grad_norm": 0.03925914590146329, | |
| "learning_rate": 0.0002465739756280013, | |
| "loss": 0.3086, | |
| "step": 424 | |
| }, | |
| { | |
| "epoch": 0.8872651356993737, | |
| "grad_norm": 0.04390031842385643, | |
| "learning_rate": 0.0002459159659724383, | |
| "loss": 0.3594, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.8893528183716075, | |
| "grad_norm": 0.04015458555102483, | |
| "learning_rate": 0.00024525743122943684, | |
| "loss": 0.2773, | |
| "step": 426 | |
| }, | |
| { | |
| "epoch": 0.8914405010438413, | |
| "grad_norm": 0.043251502572368115, | |
| "learning_rate": 0.000244598378929892, | |
| "loss": 0.3633, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.8935281837160751, | |
| "grad_norm": 0.041718720849523466, | |
| "learning_rate": 0.00024393881661061747, | |
| "loss": 0.3418, | |
| "step": 428 | |
| }, | |
| { | |
| "epoch": 0.8956158663883089, | |
| "grad_norm": 0.03667112112306563, | |
| "learning_rate": 0.0002432787518142596, | |
| "loss": 0.2754, | |
| "step": 429 | |
| }, | |
| { | |
| "epoch": 0.8977035490605428, | |
| "grad_norm": 0.04034875824232465, | |
| "learning_rate": 0.0002426181920892108, | |
| "loss": 0.3242, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.8997912317327766, | |
| "grad_norm": 0.0416779068349274, | |
| "learning_rate": 0.0002419571449895236, | |
| "loss": 0.2793, | |
| "step": 431 | |
| }, | |
| { | |
| "epoch": 0.9018789144050104, | |
| "grad_norm": 0.04492298155955886, | |
| "learning_rate": 0.00024129561807482378, | |
| "loss": 0.373, | |
| "step": 432 | |
| }, | |
| { | |
| "epoch": 0.9039665970772442, | |
| "grad_norm": 0.0358568800030184, | |
| "learning_rate": 0.00024063361891022455, | |
| "loss": 0.293, | |
| "step": 433 | |
| }, | |
| { | |
| "epoch": 0.906054279749478, | |
| "grad_norm": 0.041240679259523606, | |
| "learning_rate": 0.00023997115506623948, | |
| "loss": 0.3477, | |
| "step": 434 | |
| }, | |
| { | |
| "epoch": 0.9081419624217119, | |
| "grad_norm": 0.04776747282663658, | |
| "learning_rate": 0.00023930823411869617, | |
| "loss": 0.3633, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.9102296450939458, | |
| "grad_norm": 0.03739632540656231, | |
| "learning_rate": 0.00023864486364864963, | |
| "loss": 0.3027, | |
| "step": 436 | |
| }, | |
| { | |
| "epoch": 0.9123173277661796, | |
| "grad_norm": 0.04013506353404083, | |
| "learning_rate": 0.00023798105124229536, | |
| "loss": 0.2793, | |
| "step": 437 | |
| }, | |
| { | |
| "epoch": 0.9144050104384134, | |
| "grad_norm": 0.044295244931196354, | |
| "learning_rate": 0.00023731680449088312, | |
| "loss": 0.377, | |
| "step": 438 | |
| }, | |
| { | |
| "epoch": 0.9164926931106472, | |
| "grad_norm": 0.03805031241226096, | |
| "learning_rate": 0.00023665213099062938, | |
| "loss": 0.2559, | |
| "step": 439 | |
| }, | |
| { | |
| "epoch": 0.918580375782881, | |
| "grad_norm": 0.03767845062036613, | |
| "learning_rate": 0.00023598703834263113, | |
| "loss": 0.3125, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9206680584551148, | |
| "grad_norm": 0.04968215089246422, | |
| "learning_rate": 0.0002353215341527785, | |
| "loss": 0.3223, | |
| "step": 441 | |
| }, | |
| { | |
| "epoch": 0.9227557411273486, | |
| "grad_norm": 0.036246340480605606, | |
| "learning_rate": 0.00023465562603166808, | |
| "loss": 0.2734, | |
| "step": 442 | |
| }, | |
| { | |
| "epoch": 0.9248434237995825, | |
| "grad_norm": 0.034358684978586476, | |
| "learning_rate": 0.00023398932159451557, | |
| "loss": 0.3457, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 0.9269311064718163, | |
| "grad_norm": 0.03631746144581004, | |
| "learning_rate": 0.000233322628461069, | |
| "loss": 0.3672, | |
| "step": 444 | |
| }, | |
| { | |
| "epoch": 0.9290187891440501, | |
| "grad_norm": 0.037623854588027666, | |
| "learning_rate": 0.0002326555542555215, | |
| "loss": 0.3047, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.9311064718162839, | |
| "grad_norm": 0.037886888034629966, | |
| "learning_rate": 0.0002319881066064239, | |
| "loss": 0.2812, | |
| "step": 446 | |
| }, | |
| { | |
| "epoch": 0.9331941544885177, | |
| "grad_norm": 0.039683782098775454, | |
| "learning_rate": 0.00023132029314659795, | |
| "loss": 0.2969, | |
| "step": 447 | |
| }, | |
| { | |
| "epoch": 0.9352818371607515, | |
| "grad_norm": 0.032859528564560474, | |
| "learning_rate": 0.00023065212151304843, | |
| "loss": 0.2617, | |
| "step": 448 | |
| }, | |
| { | |
| "epoch": 0.9373695198329853, | |
| "grad_norm": 0.032092006779359715, | |
| "learning_rate": 0.00022998359934687645, | |
| "loss": 0.2656, | |
| "step": 449 | |
| }, | |
| { | |
| "epoch": 0.9394572025052192, | |
| "grad_norm": 0.03973715339510534, | |
| "learning_rate": 0.00022931473429319143, | |
| "loss": 0.3242, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.941544885177453, | |
| "grad_norm": 0.040296933695116964, | |
| "learning_rate": 0.00022864553400102437, | |
| "loss": 0.3281, | |
| "step": 451 | |
| }, | |
| { | |
| "epoch": 0.9436325678496869, | |
| "grad_norm": 0.03972360152628702, | |
| "learning_rate": 0.00022797600612323965, | |
| "loss": 0.3301, | |
| "step": 452 | |
| }, | |
| { | |
| "epoch": 0.9457202505219207, | |
| "grad_norm": 0.03196535128099967, | |
| "learning_rate": 0.00022730615831644807, | |
| "loss": 0.2617, | |
| "step": 453 | |
| }, | |
| { | |
| "epoch": 0.9478079331941545, | |
| "grad_norm": 0.041518745854850714, | |
| "learning_rate": 0.00022663599824091892, | |
| "loss": 0.3535, | |
| "step": 454 | |
| }, | |
| { | |
| "epoch": 0.9498956158663883, | |
| "grad_norm": 0.03987544052335102, | |
| "learning_rate": 0.00022596553356049283, | |
| "loss": 0.3184, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.9519832985386222, | |
| "grad_norm": 0.03790469940548919, | |
| "learning_rate": 0.0002252947719424935, | |
| "loss": 0.2988, | |
| "step": 456 | |
| }, | |
| { | |
| "epoch": 0.954070981210856, | |
| "grad_norm": 0.03494334490842234, | |
| "learning_rate": 0.00022462372105764065, | |
| "loss": 0.2793, | |
| "step": 457 | |
| }, | |
| { | |
| "epoch": 0.9561586638830898, | |
| "grad_norm": 0.03463196563976093, | |
| "learning_rate": 0.0002239523885799618, | |
| "loss": 0.2637, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 0.9582463465553236, | |
| "grad_norm": 0.03593899157528181, | |
| "learning_rate": 0.0002232807821867048, | |
| "loss": 0.3184, | |
| "step": 459 | |
| }, | |
| { | |
| "epoch": 0.9603340292275574, | |
| "grad_norm": 0.03890353431042402, | |
| "learning_rate": 0.00022260890955825001, | |
| "loss": 0.3008, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.9624217118997912, | |
| "grad_norm": 0.033343558090590815, | |
| "learning_rate": 0.00022193677837802235, | |
| "loss": 0.2891, | |
| "step": 461 | |
| }, | |
| { | |
| "epoch": 0.964509394572025, | |
| "grad_norm": 0.037945187747727475, | |
| "learning_rate": 0.0002212643963324035, | |
| "loss": 0.3574, | |
| "step": 462 | |
| }, | |
| { | |
| "epoch": 0.9665970772442589, | |
| "grad_norm": 0.03770022520852902, | |
| "learning_rate": 0.00022059177111064391, | |
| "loss": 0.3262, | |
| "step": 463 | |
| }, | |
| { | |
| "epoch": 0.9686847599164927, | |
| "grad_norm": 0.03875032196468757, | |
| "learning_rate": 0.00021991891040477516, | |
| "loss": 0.3145, | |
| "step": 464 | |
| }, | |
| { | |
| "epoch": 0.9707724425887265, | |
| "grad_norm": 0.03788954429707523, | |
| "learning_rate": 0.00021924582190952153, | |
| "loss": 0.3008, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.9728601252609603, | |
| "grad_norm": 0.035023715952115834, | |
| "learning_rate": 0.00021857251332221256, | |
| "loss": 0.2412, | |
| "step": 466 | |
| }, | |
| { | |
| "epoch": 0.9749478079331941, | |
| "grad_norm": 0.037210987286251065, | |
| "learning_rate": 0.0002178989923426944, | |
| "loss": 0.3398, | |
| "step": 467 | |
| }, | |
| { | |
| "epoch": 0.9770354906054279, | |
| "grad_norm": 0.03207771649660305, | |
| "learning_rate": 0.0002172252666732424, | |
| "loss": 0.2578, | |
| "step": 468 | |
| }, | |
| { | |
| "epoch": 0.9791231732776617, | |
| "grad_norm": 0.031951869684034837, | |
| "learning_rate": 0.00021655134401847245, | |
| "loss": 0.2695, | |
| "step": 469 | |
| }, | |
| { | |
| "epoch": 0.9812108559498957, | |
| "grad_norm": 0.03674391020210522, | |
| "learning_rate": 0.00021587723208525334, | |
| "loss": 0.3066, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.9832985386221295, | |
| "grad_norm": 0.035932078354231324, | |
| "learning_rate": 0.00021520293858261823, | |
| "loss": 0.3086, | |
| "step": 471 | |
| }, | |
| { | |
| "epoch": 0.9853862212943633, | |
| "grad_norm": 0.03876657300516048, | |
| "learning_rate": 0.00021452847122167677, | |
| "loss": 0.3008, | |
| "step": 472 | |
| }, | |
| { | |
| "epoch": 0.9874739039665971, | |
| "grad_norm": 0.032766584147543416, | |
| "learning_rate": 0.00021385383771552684, | |
| "loss": 0.3047, | |
| "step": 473 | |
| }, | |
| { | |
| "epoch": 0.9895615866388309, | |
| "grad_norm": 0.04086665364700512, | |
| "learning_rate": 0.00021317904577916635, | |
| "loss": 0.3223, | |
| "step": 474 | |
| }, | |
| { | |
| "epoch": 0.9916492693110647, | |
| "grad_norm": 0.043130532139654114, | |
| "learning_rate": 0.0002125041031294049, | |
| "loss": 0.3242, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.9937369519832986, | |
| "grad_norm": 0.04273564060035245, | |
| "learning_rate": 0.00021182901748477564, | |
| "loss": 0.4023, | |
| "step": 476 | |
| }, | |
| { | |
| "epoch": 0.9958246346555324, | |
| "grad_norm": 0.03628393896960175, | |
| "learning_rate": 0.00021115379656544715, | |
| "loss": 0.2969, | |
| "step": 477 | |
| }, | |
| { | |
| "epoch": 0.9979123173277662, | |
| "grad_norm": 0.03800046175114251, | |
| "learning_rate": 0.00021047844809313467, | |
| "loss": 0.332, | |
| "step": 478 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.038197115177252855, | |
| "learning_rate": 0.00020980297979101252, | |
| "loss": 0.2695, | |
| "step": 479 | |
| }, | |
| { | |
| "epoch": 1.0020876826722338, | |
| "grad_norm": 0.032423776045183476, | |
| "learning_rate": 0.000209127399383625, | |
| "loss": 0.2021, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.0041753653444676, | |
| "grad_norm": 0.03224992988614844, | |
| "learning_rate": 0.00020845171459679866, | |
| "loss": 0.2139, | |
| "step": 481 | |
| }, | |
| { | |
| "epoch": 1.0062630480167014, | |
| "grad_norm": 0.03676757603527534, | |
| "learning_rate": 0.00020777593315755358, | |
| "loss": 0.249, | |
| "step": 482 | |
| }, | |
| { | |
| "epoch": 1.0083507306889352, | |
| "grad_norm": 0.03889498255916948, | |
| "learning_rate": 0.00020710006279401531, | |
| "loss": 0.3047, | |
| "step": 483 | |
| }, | |
| { | |
| "epoch": 1.010438413361169, | |
| "grad_norm": 0.03394341377626214, | |
| "learning_rate": 0.00020642411123532605, | |
| "loss": 0.2598, | |
| "step": 484 | |
| }, | |
| { | |
| "epoch": 1.0125260960334028, | |
| "grad_norm": 0.03195140878316827, | |
| "learning_rate": 0.0002057480862115569, | |
| "loss": 0.2266, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 1.0146137787056368, | |
| "grad_norm": 0.03236779419185144, | |
| "learning_rate": 0.0002050719954536187, | |
| "loss": 0.2373, | |
| "step": 486 | |
| }, | |
| { | |
| "epoch": 1.0167014613778707, | |
| "grad_norm": 0.033762294027972366, | |
| "learning_rate": 0.0002043958466931744, | |
| "loss": 0.2324, | |
| "step": 487 | |
| }, | |
| { | |
| "epoch": 1.0187891440501045, | |
| "grad_norm": 0.03225364955535692, | |
| "learning_rate": 0.00020371964766254988, | |
| "loss": 0.2246, | |
| "step": 488 | |
| }, | |
| { | |
| "epoch": 1.0208768267223383, | |
| "grad_norm": 0.03549374570066917, | |
| "learning_rate": 0.00020304340609464627, | |
| "loss": 0.2559, | |
| "step": 489 | |
| }, | |
| { | |
| "epoch": 1.022964509394572, | |
| "grad_norm": 0.039376031742883684, | |
| "learning_rate": 0.00020236712972285088, | |
| "loss": 0.291, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.0250521920668059, | |
| "grad_norm": 0.033112169954733074, | |
| "learning_rate": 0.0002016908262809491, | |
| "loss": 0.2188, | |
| "step": 491 | |
| }, | |
| { | |
| "epoch": 1.0271398747390397, | |
| "grad_norm": 0.03556499196371877, | |
| "learning_rate": 0.00020101450350303596, | |
| "loss": 0.2363, | |
| "step": 492 | |
| }, | |
| { | |
| "epoch": 1.0292275574112735, | |
| "grad_norm": 0.03747952002586393, | |
| "learning_rate": 0.0002003381691234275, | |
| "loss": 0.2734, | |
| "step": 493 | |
| }, | |
| { | |
| "epoch": 1.0313152400835073, | |
| "grad_norm": 0.039212245318626486, | |
| "learning_rate": 0.00019966183087657255, | |
| "loss": 0.2812, | |
| "step": 494 | |
| }, | |
| { | |
| "epoch": 1.033402922755741, | |
| "grad_norm": 0.038556823852449205, | |
| "learning_rate": 0.00019898549649696409, | |
| "loss": 0.252, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 1.0354906054279749, | |
| "grad_norm": 0.03678903694409147, | |
| "learning_rate": 0.00019830917371905095, | |
| "loss": 0.2227, | |
| "step": 496 | |
| }, | |
| { | |
| "epoch": 1.0375782881002087, | |
| "grad_norm": 0.035536317646152164, | |
| "learning_rate": 0.00019763287027714917, | |
| "loss": 0.2451, | |
| "step": 497 | |
| }, | |
| { | |
| "epoch": 1.0396659707724425, | |
| "grad_norm": 0.03669268286478292, | |
| "learning_rate": 0.00019695659390535377, | |
| "loss": 0.2158, | |
| "step": 498 | |
| }, | |
| { | |
| "epoch": 1.0417536534446765, | |
| "grad_norm": 0.0365044069944666, | |
| "learning_rate": 0.0001962803523374501, | |
| "loss": 0.2334, | |
| "step": 499 | |
| }, | |
| { | |
| "epoch": 1.0438413361169103, | |
| "grad_norm": 0.04311308542212124, | |
| "learning_rate": 0.00019560415330682567, | |
| "loss": 0.2754, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0459290187891441, | |
| "grad_norm": 0.03807877662781654, | |
| "learning_rate": 0.00019492800454638128, | |
| "loss": 0.2207, | |
| "step": 501 | |
| }, | |
| { | |
| "epoch": 1.048016701461378, | |
| "grad_norm": 0.037347719889777004, | |
| "learning_rate": 0.00019425191378844315, | |
| "loss": 0.2598, | |
| "step": 502 | |
| }, | |
| { | |
| "epoch": 1.0501043841336117, | |
| "grad_norm": 0.041720879231187474, | |
| "learning_rate": 0.00019357588876467403, | |
| "loss": 0.2832, | |
| "step": 503 | |
| }, | |
| { | |
| "epoch": 1.0521920668058455, | |
| "grad_norm": 0.03611878979996833, | |
| "learning_rate": 0.0001928999372059848, | |
| "loss": 0.2305, | |
| "step": 504 | |
| }, | |
| { | |
| "epoch": 1.0542797494780793, | |
| "grad_norm": 0.03914121207636423, | |
| "learning_rate": 0.00019222406684244652, | |
| "loss": 0.2715, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 1.0563674321503131, | |
| "grad_norm": 0.045008260139354196, | |
| "learning_rate": 0.00019154828540320141, | |
| "loss": 0.3496, | |
| "step": 506 | |
| }, | |
| { | |
| "epoch": 1.058455114822547, | |
| "grad_norm": 0.06925075294932237, | |
| "learning_rate": 0.00019087260061637507, | |
| "loss": 0.21, | |
| "step": 507 | |
| }, | |
| { | |
| "epoch": 1.0605427974947808, | |
| "grad_norm": 0.042949891828889544, | |
| "learning_rate": 0.00019019702020898753, | |
| "loss": 0.3164, | |
| "step": 508 | |
| }, | |
| { | |
| "epoch": 1.0626304801670146, | |
| "grad_norm": 0.03336924307296428, | |
| "learning_rate": 0.00018952155190686535, | |
| "loss": 0.2334, | |
| "step": 509 | |
| }, | |
| { | |
| "epoch": 1.0647181628392484, | |
| "grad_norm": 0.037171672304770545, | |
| "learning_rate": 0.00018884620343455292, | |
| "loss": 0.2432, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.0668058455114822, | |
| "grad_norm": 0.03845099190077751, | |
| "learning_rate": 0.0001881709825152244, | |
| "loss": 0.2793, | |
| "step": 511 | |
| }, | |
| { | |
| "epoch": 1.068893528183716, | |
| "grad_norm": 0.03411076783878538, | |
| "learning_rate": 0.00018749589687059513, | |
| "loss": 0.2041, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 1.07098121085595, | |
| "grad_norm": 0.03701566955647055, | |
| "learning_rate": 0.00018682095422083373, | |
| "loss": 0.2305, | |
| "step": 513 | |
| }, | |
| { | |
| "epoch": 1.0730688935281838, | |
| "grad_norm": 0.03954320852756579, | |
| "learning_rate": 0.00018614616228447318, | |
| "loss": 0.2354, | |
| "step": 514 | |
| }, | |
| { | |
| "epoch": 1.0751565762004176, | |
| "grad_norm": 0.04313297344221612, | |
| "learning_rate": 0.00018547152877832328, | |
| "loss": 0.2656, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 1.0772442588726514, | |
| "grad_norm": 0.03799814412465588, | |
| "learning_rate": 0.0001847970614173818, | |
| "loss": 0.2285, | |
| "step": 516 | |
| }, | |
| { | |
| "epoch": 1.0793319415448852, | |
| "grad_norm": 0.04096742193043013, | |
| "learning_rate": 0.0001841227679147467, | |
| "loss": 0.25, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 1.081419624217119, | |
| "grad_norm": 0.03795069745138144, | |
| "learning_rate": 0.00018344865598152754, | |
| "loss": 0.2471, | |
| "step": 518 | |
| }, | |
| { | |
| "epoch": 1.0835073068893528, | |
| "grad_norm": 0.04325821353197005, | |
| "learning_rate": 0.00018277473332675758, | |
| "loss": 0.2754, | |
| "step": 519 | |
| }, | |
| { | |
| "epoch": 1.0855949895615866, | |
| "grad_norm": 0.04070425486872679, | |
| "learning_rate": 0.0001821010076573056, | |
| "loss": 0.2617, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.0876826722338204, | |
| "grad_norm": 0.031587246842812465, | |
| "learning_rate": 0.00018142748667778746, | |
| "loss": 0.1777, | |
| "step": 521 | |
| }, | |
| { | |
| "epoch": 1.0897703549060542, | |
| "grad_norm": 0.036539737613616244, | |
| "learning_rate": 0.00018075417809047857, | |
| "loss": 0.2773, | |
| "step": 522 | |
| }, | |
| { | |
| "epoch": 1.091858037578288, | |
| "grad_norm": 0.041178092025696736, | |
| "learning_rate": 0.00018008108959522497, | |
| "loss": 0.2676, | |
| "step": 523 | |
| }, | |
| { | |
| "epoch": 1.0939457202505218, | |
| "grad_norm": 0.04046942553591543, | |
| "learning_rate": 0.00017940822888935616, | |
| "loss": 0.2217, | |
| "step": 524 | |
| }, | |
| { | |
| "epoch": 1.0960334029227556, | |
| "grad_norm": 0.03449629028853095, | |
| "learning_rate": 0.0001787356036675966, | |
| "loss": 0.2061, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 1.0981210855949897, | |
| "grad_norm": 0.03986588393895996, | |
| "learning_rate": 0.0001780632216219777, | |
| "loss": 0.3125, | |
| "step": 526 | |
| }, | |
| { | |
| "epoch": 1.1002087682672235, | |
| "grad_norm": 0.0333875432706545, | |
| "learning_rate": 0.00017739109044175003, | |
| "loss": 0.1787, | |
| "step": 527 | |
| }, | |
| { | |
| "epoch": 1.1022964509394573, | |
| "grad_norm": 0.04046361146111627, | |
| "learning_rate": 0.00017671921781329522, | |
| "loss": 0.2715, | |
| "step": 528 | |
| }, | |
| { | |
| "epoch": 1.104384133611691, | |
| "grad_norm": 0.04262435868048576, | |
| "learning_rate": 0.00017604761142003827, | |
| "loss": 0.2559, | |
| "step": 529 | |
| }, | |
| { | |
| "epoch": 1.1064718162839249, | |
| "grad_norm": 0.03428115599372914, | |
| "learning_rate": 0.0001753762789423594, | |
| "loss": 0.2061, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.1085594989561587, | |
| "grad_norm": 0.03868138089293832, | |
| "learning_rate": 0.00017470522805750654, | |
| "loss": 0.2236, | |
| "step": 531 | |
| }, | |
| { | |
| "epoch": 1.1106471816283925, | |
| "grad_norm": 0.03475622072699226, | |
| "learning_rate": 0.00017403446643950724, | |
| "loss": 0.248, | |
| "step": 532 | |
| }, | |
| { | |
| "epoch": 1.1127348643006263, | |
| "grad_norm": 0.03979226115172334, | |
| "learning_rate": 0.00017336400175908112, | |
| "loss": 0.2988, | |
| "step": 533 | |
| }, | |
| { | |
| "epoch": 1.11482254697286, | |
| "grad_norm": 0.036336277616070035, | |
| "learning_rate": 0.000172693841683552, | |
| "loss": 0.2236, | |
| "step": 534 | |
| }, | |
| { | |
| "epoch": 1.116910229645094, | |
| "grad_norm": 0.039263581746387936, | |
| "learning_rate": 0.0001720239938767604, | |
| "loss": 0.332, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 1.1189979123173277, | |
| "grad_norm": 0.04395872414762282, | |
| "learning_rate": 0.00017135446599897565, | |
| "loss": 0.2734, | |
| "step": 536 | |
| }, | |
| { | |
| "epoch": 1.1210855949895615, | |
| "grad_norm": 0.03867598581621191, | |
| "learning_rate": 0.00017068526570680853, | |
| "loss": 0.249, | |
| "step": 537 | |
| }, | |
| { | |
| "epoch": 1.1231732776617953, | |
| "grad_norm": 0.035662617079617494, | |
| "learning_rate": 0.00017001640065312357, | |
| "loss": 0.2041, | |
| "step": 538 | |
| }, | |
| { | |
| "epoch": 1.1252609603340291, | |
| "grad_norm": 0.04078186211455469, | |
| "learning_rate": 0.00016934787848695156, | |
| "loss": 0.2119, | |
| "step": 539 | |
| }, | |
| { | |
| "epoch": 1.1273486430062631, | |
| "grad_norm": 0.03947326929596921, | |
| "learning_rate": 0.00016867970685340215, | |
| "loss": 0.2393, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.129436325678497, | |
| "grad_norm": 0.04449645249301931, | |
| "learning_rate": 0.00016801189339357616, | |
| "loss": 0.3203, | |
| "step": 541 | |
| }, | |
| { | |
| "epoch": 1.1315240083507307, | |
| "grad_norm": 0.046340346999829446, | |
| "learning_rate": 0.00016734444574447862, | |
| "loss": 0.2598, | |
| "step": 542 | |
| }, | |
| { | |
| "epoch": 1.1336116910229646, | |
| "grad_norm": 0.03957931201804222, | |
| "learning_rate": 0.00016667737153893105, | |
| "loss": 0.2402, | |
| "step": 543 | |
| }, | |
| { | |
| "epoch": 1.1356993736951984, | |
| "grad_norm": 0.03703757920078097, | |
| "learning_rate": 0.00016601067840548453, | |
| "loss": 0.1924, | |
| "step": 544 | |
| }, | |
| { | |
| "epoch": 1.1377870563674322, | |
| "grad_norm": 0.03682994100461504, | |
| "learning_rate": 0.000165344373968332, | |
| "loss": 0.2383, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 1.139874739039666, | |
| "grad_norm": 0.03920731523127231, | |
| "learning_rate": 0.00016467846584722153, | |
| "loss": 0.2734, | |
| "step": 546 | |
| }, | |
| { | |
| "epoch": 1.1419624217118998, | |
| "grad_norm": 0.04276682539134295, | |
| "learning_rate": 0.0001640129616573689, | |
| "loss": 0.2852, | |
| "step": 547 | |
| }, | |
| { | |
| "epoch": 1.1440501043841336, | |
| "grad_norm": 0.03614032190490994, | |
| "learning_rate": 0.00016334786900937067, | |
| "loss": 0.2354, | |
| "step": 548 | |
| }, | |
| { | |
| "epoch": 1.1461377870563674, | |
| "grad_norm": 0.041006439179447914, | |
| "learning_rate": 0.00016268319550911695, | |
| "loss": 0.2432, | |
| "step": 549 | |
| }, | |
| { | |
| "epoch": 1.1482254697286012, | |
| "grad_norm": 0.044517563506872825, | |
| "learning_rate": 0.00016201894875770466, | |
| "loss": 0.2217, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.150313152400835, | |
| "grad_norm": 0.03655321665381233, | |
| "learning_rate": 0.00016135513635135045, | |
| "loss": 0.2031, | |
| "step": 551 | |
| }, | |
| { | |
| "epoch": 1.152400835073069, | |
| "grad_norm": 0.04079189189547784, | |
| "learning_rate": 0.00016069176588130388, | |
| "loss": 0.3027, | |
| "step": 552 | |
| }, | |
| { | |
| "epoch": 1.1544885177453028, | |
| "grad_norm": 0.03485321031694839, | |
| "learning_rate": 0.00016002884493376054, | |
| "loss": 0.2275, | |
| "step": 553 | |
| }, | |
| { | |
| "epoch": 1.1565762004175366, | |
| "grad_norm": 0.03052141642274577, | |
| "learning_rate": 0.00015936638108977546, | |
| "loss": 0.1992, | |
| "step": 554 | |
| }, | |
| { | |
| "epoch": 1.1586638830897704, | |
| "grad_norm": 0.03724204209659217, | |
| "learning_rate": 0.0001587043819251762, | |
| "loss": 0.2207, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 1.1607515657620042, | |
| "grad_norm": 0.03701626710699115, | |
| "learning_rate": 0.00015804285501047646, | |
| "loss": 0.2041, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 1.162839248434238, | |
| "grad_norm": 0.04171363907307022, | |
| "learning_rate": 0.00015738180791078922, | |
| "loss": 0.2715, | |
| "step": 557 | |
| }, | |
| { | |
| "epoch": 1.1649269311064718, | |
| "grad_norm": 0.0392243815886844, | |
| "learning_rate": 0.00015672124818574042, | |
| "loss": 0.2734, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 1.1670146137787056, | |
| "grad_norm": 0.034576244303318175, | |
| "learning_rate": 0.0001560611833893826, | |
| "loss": 0.207, | |
| "step": 559 | |
| }, | |
| { | |
| "epoch": 1.1691022964509394, | |
| "grad_norm": 0.03659195217316462, | |
| "learning_rate": 0.0001554016210701081, | |
| "loss": 0.2178, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.1711899791231732, | |
| "grad_norm": 0.04227483515507797, | |
| "learning_rate": 0.00015474256877056326, | |
| "loss": 0.3086, | |
| "step": 561 | |
| }, | |
| { | |
| "epoch": 1.173277661795407, | |
| "grad_norm": 0.0359857035728927, | |
| "learning_rate": 0.00015408403402756177, | |
| "loss": 0.2617, | |
| "step": 562 | |
| }, | |
| { | |
| "epoch": 1.1753653444676408, | |
| "grad_norm": 0.03680442854270533, | |
| "learning_rate": 0.0001534260243719988, | |
| "loss": 0.209, | |
| "step": 563 | |
| }, | |
| { | |
| "epoch": 1.1774530271398747, | |
| "grad_norm": 0.03670404417452906, | |
| "learning_rate": 0.0001527685473287645, | |
| "loss": 0.2197, | |
| "step": 564 | |
| }, | |
| { | |
| "epoch": 1.1795407098121085, | |
| "grad_norm": 0.0393433887764882, | |
| "learning_rate": 0.00015211161041665832, | |
| "loss": 0.2324, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 1.1816283924843423, | |
| "grad_norm": 0.03642939621217633, | |
| "learning_rate": 0.00015145522114830268, | |
| "loss": 0.2393, | |
| "step": 566 | |
| }, | |
| { | |
| "epoch": 1.1837160751565763, | |
| "grad_norm": 0.03878579783362816, | |
| "learning_rate": 0.00015079938703005752, | |
| "loss": 0.2559, | |
| "step": 567 | |
| }, | |
| { | |
| "epoch": 1.18580375782881, | |
| "grad_norm": 0.040970554736426454, | |
| "learning_rate": 0.0001501441155619338, | |
| "loss": 0.2139, | |
| "step": 568 | |
| }, | |
| { | |
| "epoch": 1.187891440501044, | |
| "grad_norm": 0.036820951922352504, | |
| "learning_rate": 0.0001494894142375085, | |
| "loss": 0.1719, | |
| "step": 569 | |
| }, | |
| { | |
| "epoch": 1.1899791231732777, | |
| "grad_norm": 0.04473872009108077, | |
| "learning_rate": 0.00014883529054383817, | |
| "loss": 0.2734, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.1920668058455115, | |
| "grad_norm": 0.037460855453091094, | |
| "learning_rate": 0.00014818175196137392, | |
| "loss": 0.2334, | |
| "step": 571 | |
| }, | |
| { | |
| "epoch": 1.1941544885177453, | |
| "grad_norm": 0.03980728407908227, | |
| "learning_rate": 0.00014752880596387551, | |
| "loss": 0.2295, | |
| "step": 572 | |
| }, | |
| { | |
| "epoch": 1.196242171189979, | |
| "grad_norm": 0.03562870229391469, | |
| "learning_rate": 0.00014687646001832608, | |
| "loss": 0.1953, | |
| "step": 573 | |
| }, | |
| { | |
| "epoch": 1.198329853862213, | |
| "grad_norm": 0.0412827193725494, | |
| "learning_rate": 0.00014622472158484654, | |
| "loss": 0.25, | |
| "step": 574 | |
| }, | |
| { | |
| "epoch": 1.2004175365344467, | |
| "grad_norm": 0.03634424947703343, | |
| "learning_rate": 0.00014557359811661054, | |
| "loss": 0.207, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 1.2025052192066805, | |
| "grad_norm": 0.036441371511458535, | |
| "learning_rate": 0.000144923097059759, | |
| "loss": 0.249, | |
| "step": 576 | |
| }, | |
| { | |
| "epoch": 1.2045929018789143, | |
| "grad_norm": 0.04200794927737242, | |
| "learning_rate": 0.00014427322585331505, | |
| "loss": 0.3027, | |
| "step": 577 | |
| }, | |
| { | |
| "epoch": 1.2066805845511483, | |
| "grad_norm": 0.03620959513479551, | |
| "learning_rate": 0.00014362399192909908, | |
| "loss": 0.2324, | |
| "step": 578 | |
| }, | |
| { | |
| "epoch": 1.2087682672233822, | |
| "grad_norm": 0.03930165573165628, | |
| "learning_rate": 0.0001429754027116433, | |
| "loss": 0.2812, | |
| "step": 579 | |
| }, | |
| { | |
| "epoch": 1.210855949895616, | |
| "grad_norm": 0.040322537436277076, | |
| "learning_rate": 0.00014232746561810747, | |
| "loss": 0.2773, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.2129436325678498, | |
| "grad_norm": 0.03781024017014511, | |
| "learning_rate": 0.00014168018805819376, | |
| "loss": 0.2354, | |
| "step": 581 | |
| }, | |
| { | |
| "epoch": 1.2150313152400836, | |
| "grad_norm": 0.038595664424750396, | |
| "learning_rate": 0.00014103357743406182, | |
| "loss": 0.2715, | |
| "step": 582 | |
| }, | |
| { | |
| "epoch": 1.2171189979123174, | |
| "grad_norm": 0.034975311451833, | |
| "learning_rate": 0.00014038764114024456, | |
| "loss": 0.2041, | |
| "step": 583 | |
| }, | |
| { | |
| "epoch": 1.2192066805845512, | |
| "grad_norm": 0.03825635258528123, | |
| "learning_rate": 0.00013974238656356313, | |
| "loss": 0.2344, | |
| "step": 584 | |
| }, | |
| { | |
| "epoch": 1.221294363256785, | |
| "grad_norm": 0.03735073552974533, | |
| "learning_rate": 0.0001390978210830429, | |
| "loss": 0.2559, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 1.2233820459290188, | |
| "grad_norm": 0.040138739862162154, | |
| "learning_rate": 0.00013845395206982863, | |
| "loss": 0.2656, | |
| "step": 586 | |
| }, | |
| { | |
| "epoch": 1.2254697286012526, | |
| "grad_norm": 0.04418658015359039, | |
| "learning_rate": 0.00013781078688710063, | |
| "loss": 0.291, | |
| "step": 587 | |
| }, | |
| { | |
| "epoch": 1.2275574112734864, | |
| "grad_norm": 0.04239030702531795, | |
| "learning_rate": 0.00013716833288999005, | |
| "loss": 0.2637, | |
| "step": 588 | |
| }, | |
| { | |
| "epoch": 1.2296450939457202, | |
| "grad_norm": 0.03818928317637622, | |
| "learning_rate": 0.00013652659742549532, | |
| "loss": 0.252, | |
| "step": 589 | |
| }, | |
| { | |
| "epoch": 1.231732776617954, | |
| "grad_norm": 0.033789428188020935, | |
| "learning_rate": 0.00013588558783239762, | |
| "loss": 0.1826, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.2338204592901878, | |
| "grad_norm": 0.03479799715006895, | |
| "learning_rate": 0.00013524531144117736, | |
| "loss": 0.208, | |
| "step": 591 | |
| }, | |
| { | |
| "epoch": 1.2359081419624216, | |
| "grad_norm": 0.0407139048904091, | |
| "learning_rate": 0.00013460577557393, | |
| "loss": 0.2832, | |
| "step": 592 | |
| }, | |
| { | |
| "epoch": 1.2379958246346556, | |
| "grad_norm": 0.04211809721321409, | |
| "learning_rate": 0.00013396698754428266, | |
| "loss": 0.2402, | |
| "step": 593 | |
| }, | |
| { | |
| "epoch": 1.2400835073068894, | |
| "grad_norm": 0.038313063106902565, | |
| "learning_rate": 0.00013332895465731018, | |
| "loss": 0.2539, | |
| "step": 594 | |
| }, | |
| { | |
| "epoch": 1.2421711899791232, | |
| "grad_norm": 0.034451480337492464, | |
| "learning_rate": 0.00013269168420945178, | |
| "loss": 0.209, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 1.244258872651357, | |
| "grad_norm": 0.042570134603052, | |
| "learning_rate": 0.0001320551834884276, | |
| "loss": 0.2734, | |
| "step": 596 | |
| }, | |
| { | |
| "epoch": 1.2463465553235908, | |
| "grad_norm": 0.041331289867428476, | |
| "learning_rate": 0.00013141945977315515, | |
| "loss": 0.3066, | |
| "step": 597 | |
| }, | |
| { | |
| "epoch": 1.2484342379958246, | |
| "grad_norm": 0.03567452834168155, | |
| "learning_rate": 0.00013078452033366635, | |
| "loss": 0.2129, | |
| "step": 598 | |
| }, | |
| { | |
| "epoch": 1.2505219206680585, | |
| "grad_norm": 0.03702357216857016, | |
| "learning_rate": 0.00013015037243102425, | |
| "loss": 0.2471, | |
| "step": 599 | |
| }, | |
| { | |
| "epoch": 1.2526096033402923, | |
| "grad_norm": 0.03423169810999704, | |
| "learning_rate": 0.00012951702331724007, | |
| "loss": 0.2236, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.254697286012526, | |
| "grad_norm": 0.04865070295457581, | |
| "learning_rate": 0.00012888448023519006, | |
| "loss": 0.3223, | |
| "step": 601 | |
| }, | |
| { | |
| "epoch": 1.2567849686847599, | |
| "grad_norm": 0.031921521655174964, | |
| "learning_rate": 0.000128252750418533, | |
| "loss": 0.1934, | |
| "step": 602 | |
| }, | |
| { | |
| "epoch": 1.2588726513569937, | |
| "grad_norm": 0.03207714489804328, | |
| "learning_rate": 0.0001276218410916272, | |
| "loss": 0.1768, | |
| "step": 603 | |
| }, | |
| { | |
| "epoch": 1.2609603340292277, | |
| "grad_norm": 0.03764702756962339, | |
| "learning_rate": 0.000126991759469448, | |
| "loss": 0.207, | |
| "step": 604 | |
| }, | |
| { | |
| "epoch": 1.2630480167014615, | |
| "grad_norm": 0.03494076286574369, | |
| "learning_rate": 0.0001263625127575052, | |
| "loss": 0.2129, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 1.2651356993736953, | |
| "grad_norm": 0.03530986655804941, | |
| "learning_rate": 0.00012573410815176083, | |
| "loss": 0.2617, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 1.267223382045929, | |
| "grad_norm": 0.03741233532866471, | |
| "learning_rate": 0.00012510655283854658, | |
| "loss": 0.1914, | |
| "step": 607 | |
| }, | |
| { | |
| "epoch": 1.269311064718163, | |
| "grad_norm": 0.03133581344814849, | |
| "learning_rate": 0.00012447985399448194, | |
| "loss": 0.1631, | |
| "step": 608 | |
| }, | |
| { | |
| "epoch": 1.2713987473903967, | |
| "grad_norm": 0.03819672658693745, | |
| "learning_rate": 0.00012385401878639175, | |
| "loss": 0.2891, | |
| "step": 609 | |
| }, | |
| { | |
| "epoch": 1.2734864300626305, | |
| "grad_norm": 0.04476513417542424, | |
| "learning_rate": 0.00012322905437122468, | |
| "loss": 0.3379, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.2755741127348643, | |
| "grad_norm": 0.037096901415977844, | |
| "learning_rate": 0.00012260496789597093, | |
| "loss": 0.2637, | |
| "step": 611 | |
| }, | |
| { | |
| "epoch": 1.2776617954070981, | |
| "grad_norm": 0.033560968570005754, | |
| "learning_rate": 0.00012198176649758091, | |
| "loss": 0.1768, | |
| "step": 612 | |
| }, | |
| { | |
| "epoch": 1.279749478079332, | |
| "grad_norm": 0.03915286043749694, | |
| "learning_rate": 0.00012135945730288333, | |
| "loss": 0.2402, | |
| "step": 613 | |
| }, | |
| { | |
| "epoch": 1.2818371607515657, | |
| "grad_norm": 0.04243780465562693, | |
| "learning_rate": 0.00012073804742850385, | |
| "loss": 0.2266, | |
| "step": 614 | |
| }, | |
| { | |
| "epoch": 1.2839248434237995, | |
| "grad_norm": 0.035340613835254, | |
| "learning_rate": 0.00012011754398078369, | |
| "loss": 0.2021, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 1.2860125260960333, | |
| "grad_norm": 0.038064762794890517, | |
| "learning_rate": 0.00011949795405569813, | |
| "loss": 0.2373, | |
| "step": 616 | |
| }, | |
| { | |
| "epoch": 1.2881002087682671, | |
| "grad_norm": 0.03189685893047627, | |
| "learning_rate": 0.00011887928473877581, | |
| "loss": 0.2178, | |
| "step": 617 | |
| }, | |
| { | |
| "epoch": 1.290187891440501, | |
| "grad_norm": 0.04119691632833074, | |
| "learning_rate": 0.00011826154310501723, | |
| "loss": 0.2832, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 1.2922755741127347, | |
| "grad_norm": 0.035970451962649, | |
| "learning_rate": 0.00011764473621881427, | |
| "loss": 0.2295, | |
| "step": 619 | |
| }, | |
| { | |
| "epoch": 1.2943632567849686, | |
| "grad_norm": 0.03644862411137844, | |
| "learning_rate": 0.000117028871133869, | |
| "loss": 0.293, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.2964509394572026, | |
| "grad_norm": 0.04232916111512329, | |
| "learning_rate": 0.00011641395489311334, | |
| "loss": 0.2432, | |
| "step": 621 | |
| }, | |
| { | |
| "epoch": 1.2985386221294364, | |
| "grad_norm": 0.03815713518197317, | |
| "learning_rate": 0.00011579999452862834, | |
| "loss": 0.2363, | |
| "step": 622 | |
| }, | |
| { | |
| "epoch": 1.3006263048016702, | |
| "grad_norm": 0.03522231596610688, | |
| "learning_rate": 0.00011518699706156373, | |
| "loss": 0.2275, | |
| "step": 623 | |
| }, | |
| { | |
| "epoch": 1.302713987473904, | |
| "grad_norm": 0.03763846715271956, | |
| "learning_rate": 0.00011457496950205784, | |
| "loss": 0.2139, | |
| "step": 624 | |
| }, | |
| { | |
| "epoch": 1.3048016701461378, | |
| "grad_norm": 0.03937303225722648, | |
| "learning_rate": 0.00011396391884915707, | |
| "loss": 0.2246, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 1.3068893528183716, | |
| "grad_norm": 0.03259155185424707, | |
| "learning_rate": 0.00011335385209073645, | |
| "loss": 0.2148, | |
| "step": 626 | |
| }, | |
| { | |
| "epoch": 1.3089770354906054, | |
| "grad_norm": 0.036437191147767146, | |
| "learning_rate": 0.00011274477620341906, | |
| "loss": 0.2168, | |
| "step": 627 | |
| }, | |
| { | |
| "epoch": 1.3110647181628392, | |
| "grad_norm": 0.03676944565811173, | |
| "learning_rate": 0.00011213669815249659, | |
| "loss": 0.21, | |
| "step": 628 | |
| }, | |
| { | |
| "epoch": 1.313152400835073, | |
| "grad_norm": 0.03659844810941453, | |
| "learning_rate": 0.00011152962489184955, | |
| "loss": 0.2139, | |
| "step": 629 | |
| }, | |
| { | |
| "epoch": 1.3152400835073068, | |
| "grad_norm": 0.03855990494361526, | |
| "learning_rate": 0.0001109235633638682, | |
| "loss": 0.2373, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.3173277661795408, | |
| "grad_norm": 0.034719164079528235, | |
| "learning_rate": 0.00011031852049937237, | |
| "loss": 0.2236, | |
| "step": 631 | |
| }, | |
| { | |
| "epoch": 1.3194154488517746, | |
| "grad_norm": 0.03650236752350518, | |
| "learning_rate": 0.00010971450321753276, | |
| "loss": 0.2197, | |
| "step": 632 | |
| }, | |
| { | |
| "epoch": 1.3215031315240084, | |
| "grad_norm": 0.035674494077417296, | |
| "learning_rate": 0.00010911151842579195, | |
| "loss": 0.25, | |
| "step": 633 | |
| }, | |
| { | |
| "epoch": 1.3235908141962422, | |
| "grad_norm": 0.040631215715819284, | |
| "learning_rate": 0.00010850957301978462, | |
| "loss": 0.2891, | |
| "step": 634 | |
| }, | |
| { | |
| "epoch": 1.325678496868476, | |
| "grad_norm": 0.03124048788618308, | |
| "learning_rate": 0.00010790867388325951, | |
| "loss": 0.208, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 1.3277661795407099, | |
| "grad_norm": 0.04271594640915922, | |
| "learning_rate": 0.0001073088278880005, | |
| "loss": 0.3184, | |
| "step": 636 | |
| }, | |
| { | |
| "epoch": 1.3298538622129437, | |
| "grad_norm": 0.03507030432270385, | |
| "learning_rate": 0.0001067100418937477, | |
| "loss": 0.208, | |
| "step": 637 | |
| }, | |
| { | |
| "epoch": 1.3319415448851775, | |
| "grad_norm": 0.04020676296142397, | |
| "learning_rate": 0.00010611232274811929, | |
| "loss": 0.2988, | |
| "step": 638 | |
| }, | |
| { | |
| "epoch": 1.3340292275574113, | |
| "grad_norm": 0.034794700042634165, | |
| "learning_rate": 0.00010551567728653311, | |
| "loss": 0.2061, | |
| "step": 639 | |
| }, | |
| { | |
| "epoch": 1.336116910229645, | |
| "grad_norm": 0.04251059198573156, | |
| "learning_rate": 0.00010492011233212871, | |
| "loss": 0.2578, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.3382045929018789, | |
| "grad_norm": 0.031789932037755535, | |
| "learning_rate": 0.0001043256346956889, | |
| "loss": 0.2109, | |
| "step": 641 | |
| }, | |
| { | |
| "epoch": 1.3402922755741127, | |
| "grad_norm": 0.039155457576247846, | |
| "learning_rate": 0.00010373225117556224, | |
| "loss": 0.2422, | |
| "step": 642 | |
| }, | |
| { | |
| "epoch": 1.3423799582463465, | |
| "grad_norm": 0.035388665066062634, | |
| "learning_rate": 0.00010313996855758504, | |
| "loss": 0.2002, | |
| "step": 643 | |
| }, | |
| { | |
| "epoch": 1.3444676409185803, | |
| "grad_norm": 0.0385841616358797, | |
| "learning_rate": 0.00010254879361500407, | |
| "loss": 0.2041, | |
| "step": 644 | |
| }, | |
| { | |
| "epoch": 1.346555323590814, | |
| "grad_norm": 0.03971686342813934, | |
| "learning_rate": 0.00010195873310839874, | |
| "loss": 0.2256, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 1.348643006263048, | |
| "grad_norm": 0.04152102958645101, | |
| "learning_rate": 0.00010136979378560398, | |
| "loss": 0.2734, | |
| "step": 646 | |
| }, | |
| { | |
| "epoch": 1.350730688935282, | |
| "grad_norm": 0.036651673265288884, | |
| "learning_rate": 0.00010078198238163299, | |
| "loss": 0.2148, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 1.3528183716075157, | |
| "grad_norm": 0.032824107867213306, | |
| "learning_rate": 0.00010019530561860051, | |
| "loss": 0.208, | |
| "step": 648 | |
| }, | |
| { | |
| "epoch": 1.3549060542797495, | |
| "grad_norm": 0.03503308809553616, | |
| "learning_rate": 9.96097702056455e-05, | |
| "loss": 0.2041, | |
| "step": 649 | |
| }, | |
| { | |
| "epoch": 1.3569937369519833, | |
| "grad_norm": 0.0404355664516907, | |
| "learning_rate": 9.902538283885465e-05, | |
| "loss": 0.2021, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.3590814196242171, | |
| "grad_norm": 0.047056225295053385, | |
| "learning_rate": 9.844215020118576e-05, | |
| "loss": 0.3125, | |
| "step": 651 | |
| }, | |
| { | |
| "epoch": 1.361169102296451, | |
| "grad_norm": 0.0328129234031199, | |
| "learning_rate": 9.78600789623916e-05, | |
| "loss": 0.1973, | |
| "step": 652 | |
| }, | |
| { | |
| "epoch": 1.3632567849686847, | |
| "grad_norm": 0.03175285628250321, | |
| "learning_rate": 9.727917577894297e-05, | |
| "loss": 0.1914, | |
| "step": 653 | |
| }, | |
| { | |
| "epoch": 1.3653444676409185, | |
| "grad_norm": 0.04520466671433204, | |
| "learning_rate": 9.669944729395316e-05, | |
| "loss": 0.3125, | |
| "step": 654 | |
| }, | |
| { | |
| "epoch": 1.3674321503131524, | |
| "grad_norm": 0.03901825127204986, | |
| "learning_rate": 9.612090013710195e-05, | |
| "loss": 0.2412, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 1.3695198329853862, | |
| "grad_norm": 0.037647523878022705, | |
| "learning_rate": 9.554354092455949e-05, | |
| "loss": 0.2227, | |
| "step": 656 | |
| }, | |
| { | |
| "epoch": 1.3716075156576202, | |
| "grad_norm": 0.03796904304768066, | |
| "learning_rate": 9.496737625891076e-05, | |
| "loss": 0.2402, | |
| "step": 657 | |
| }, | |
| { | |
| "epoch": 1.373695198329854, | |
| "grad_norm": 0.036620990435136655, | |
| "learning_rate": 9.439241272908012e-05, | |
| "loss": 0.21, | |
| "step": 658 | |
| }, | |
| { | |
| "epoch": 1.3757828810020878, | |
| "grad_norm": 0.03753406391892656, | |
| "learning_rate": 9.381865691025613e-05, | |
| "loss": 0.2471, | |
| "step": 659 | |
| }, | |
| { | |
| "epoch": 1.3778705636743216, | |
| "grad_norm": 0.03675743788954959, | |
| "learning_rate": 9.324611536381591e-05, | |
| "loss": 0.2256, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.3799582463465554, | |
| "grad_norm": 0.04379732725831618, | |
| "learning_rate": 9.267479463725048e-05, | |
| "loss": 0.3184, | |
| "step": 661 | |
| }, | |
| { | |
| "epoch": 1.3820459290187892, | |
| "grad_norm": 0.04108282244231379, | |
| "learning_rate": 9.210470126408966e-05, | |
| "loss": 0.3145, | |
| "step": 662 | |
| }, | |
| { | |
| "epoch": 1.384133611691023, | |
| "grad_norm": 0.033523837380523475, | |
| "learning_rate": 9.15358417638277e-05, | |
| "loss": 0.2021, | |
| "step": 663 | |
| }, | |
| { | |
| "epoch": 1.3862212943632568, | |
| "grad_norm": 0.03998207042720944, | |
| "learning_rate": 9.096822264184825e-05, | |
| "loss": 0.2441, | |
| "step": 664 | |
| }, | |
| { | |
| "epoch": 1.3883089770354906, | |
| "grad_norm": 0.037635714292856164, | |
| "learning_rate": 9.040185038935029e-05, | |
| "loss": 0.1934, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.3903966597077244, | |
| "grad_norm": 0.04063187756459359, | |
| "learning_rate": 8.983673148327369e-05, | |
| "loss": 0.2812, | |
| "step": 666 | |
| }, | |
| { | |
| "epoch": 1.3924843423799582, | |
| "grad_norm": 0.03568609472936473, | |
| "learning_rate": 8.927287238622555e-05, | |
| "loss": 0.2578, | |
| "step": 667 | |
| }, | |
| { | |
| "epoch": 1.394572025052192, | |
| "grad_norm": 0.044075873922673904, | |
| "learning_rate": 8.871027954640567e-05, | |
| "loss": 0.2949, | |
| "step": 668 | |
| }, | |
| { | |
| "epoch": 1.3966597077244258, | |
| "grad_norm": 0.04018365813830774, | |
| "learning_rate": 8.814895939753331e-05, | |
| "loss": 0.3203, | |
| "step": 669 | |
| }, | |
| { | |
| "epoch": 1.3987473903966596, | |
| "grad_norm": 0.038371138538880856, | |
| "learning_rate": 8.758891835877335e-05, | |
| "loss": 0.2451, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.4008350730688934, | |
| "grad_norm": 0.03644115907045684, | |
| "learning_rate": 8.703016283466323e-05, | |
| "loss": 0.2109, | |
| "step": 671 | |
| }, | |
| { | |
| "epoch": 1.4029227557411272, | |
| "grad_norm": 0.0355184313530291, | |
| "learning_rate": 8.64726992150391e-05, | |
| "loss": 0.2256, | |
| "step": 672 | |
| }, | |
| { | |
| "epoch": 1.405010438413361, | |
| "grad_norm": 0.032473676946887235, | |
| "learning_rate": 8.59165338749632e-05, | |
| "loss": 0.2266, | |
| "step": 673 | |
| }, | |
| { | |
| "epoch": 1.407098121085595, | |
| "grad_norm": 0.03821259918901156, | |
| "learning_rate": 8.536167317465114e-05, | |
| "loss": 0.2305, | |
| "step": 674 | |
| }, | |
| { | |
| "epoch": 1.4091858037578289, | |
| "grad_norm": 0.035842354722894494, | |
| "learning_rate": 8.480812345939855e-05, | |
| "loss": 0.2363, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.4112734864300627, | |
| "grad_norm": 0.034936854456220405, | |
| "learning_rate": 8.4255891059509e-05, | |
| "loss": 0.2344, | |
| "step": 676 | |
| }, | |
| { | |
| "epoch": 1.4133611691022965, | |
| "grad_norm": 0.0327536953633252, | |
| "learning_rate": 8.370498229022133e-05, | |
| "loss": 0.2354, | |
| "step": 677 | |
| }, | |
| { | |
| "epoch": 1.4154488517745303, | |
| "grad_norm": 0.03332967518468726, | |
| "learning_rate": 8.315540345163783e-05, | |
| "loss": 0.21, | |
| "step": 678 | |
| }, | |
| { | |
| "epoch": 1.417536534446764, | |
| "grad_norm": 0.039757185578627195, | |
| "learning_rate": 8.26071608286517e-05, | |
| "loss": 0.2451, | |
| "step": 679 | |
| }, | |
| { | |
| "epoch": 1.4196242171189979, | |
| "grad_norm": 0.033351639676043855, | |
| "learning_rate": 8.206026069087538e-05, | |
| "loss": 0.1963, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.4217118997912317, | |
| "grad_norm": 0.036626791510792185, | |
| "learning_rate": 8.151470929256893e-05, | |
| "loss": 0.2471, | |
| "step": 681 | |
| }, | |
| { | |
| "epoch": 1.4237995824634655, | |
| "grad_norm": 0.03663750757496257, | |
| "learning_rate": 8.097051287256854e-05, | |
| "loss": 0.2559, | |
| "step": 682 | |
| }, | |
| { | |
| "epoch": 1.4258872651356993, | |
| "grad_norm": 0.03533804105379715, | |
| "learning_rate": 8.042767765421499e-05, | |
| "loss": 0.2334, | |
| "step": 683 | |
| }, | |
| { | |
| "epoch": 1.4279749478079333, | |
| "grad_norm": 0.03548267765082115, | |
| "learning_rate": 7.988620984528248e-05, | |
| "loss": 0.1992, | |
| "step": 684 | |
| }, | |
| { | |
| "epoch": 1.4300626304801671, | |
| "grad_norm": 0.03365970412354974, | |
| "learning_rate": 7.934611563790803e-05, | |
| "loss": 0.2129, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 1.432150313152401, | |
| "grad_norm": 0.036222173404344804, | |
| "learning_rate": 7.880740120852012e-05, | |
| "loss": 0.2441, | |
| "step": 686 | |
| }, | |
| { | |
| "epoch": 1.4342379958246347, | |
| "grad_norm": 0.03545069244831471, | |
| "learning_rate": 7.827007271776843e-05, | |
| "loss": 0.2246, | |
| "step": 687 | |
| }, | |
| { | |
| "epoch": 1.4363256784968685, | |
| "grad_norm": 0.036141104648819485, | |
| "learning_rate": 7.773413631045314e-05, | |
| "loss": 0.2334, | |
| "step": 688 | |
| }, | |
| { | |
| "epoch": 1.4384133611691023, | |
| "grad_norm": 0.03552223808507066, | |
| "learning_rate": 7.719959811545512e-05, | |
| "loss": 0.207, | |
| "step": 689 | |
| }, | |
| { | |
| "epoch": 1.4405010438413361, | |
| "grad_norm": 0.040531388046511914, | |
| "learning_rate": 7.666646424566508e-05, | |
| "loss": 0.25, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.44258872651357, | |
| "grad_norm": 0.03642114412895087, | |
| "learning_rate": 7.613474079791432e-05, | |
| "loss": 0.2275, | |
| "step": 691 | |
| }, | |
| { | |
| "epoch": 1.4446764091858038, | |
| "grad_norm": 0.03334766371093395, | |
| "learning_rate": 7.56044338529049e-05, | |
| "loss": 0.2246, | |
| "step": 692 | |
| }, | |
| { | |
| "epoch": 1.4467640918580376, | |
| "grad_norm": 0.038435220776801955, | |
| "learning_rate": 7.50755494751398e-05, | |
| "loss": 0.2871, | |
| "step": 693 | |
| }, | |
| { | |
| "epoch": 1.4488517745302714, | |
| "grad_norm": 0.03639976375126217, | |
| "learning_rate": 7.454809371285381e-05, | |
| "loss": 0.2236, | |
| "step": 694 | |
| }, | |
| { | |
| "epoch": 1.4509394572025052, | |
| "grad_norm": 0.04139491524391803, | |
| "learning_rate": 7.402207259794428e-05, | |
| "loss": 0.2617, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 1.453027139874739, | |
| "grad_norm": 0.031147759796875986, | |
| "learning_rate": 7.34974921459023e-05, | |
| "loss": 0.1904, | |
| "step": 696 | |
| }, | |
| { | |
| "epoch": 1.4551148225469728, | |
| "grad_norm": 0.04026433329347746, | |
| "learning_rate": 7.297435835574362e-05, | |
| "loss": 0.2393, | |
| "step": 697 | |
| }, | |
| { | |
| "epoch": 1.4572025052192066, | |
| "grad_norm": 0.03965005806725716, | |
| "learning_rate": 7.24526772099403e-05, | |
| "loss": 0.2715, | |
| "step": 698 | |
| }, | |
| { | |
| "epoch": 1.4592901878914404, | |
| "grad_norm": 0.040893406654203844, | |
| "learning_rate": 7.193245467435206e-05, | |
| "loss": 0.2471, | |
| "step": 699 | |
| }, | |
| { | |
| "epoch": 1.4613778705636742, | |
| "grad_norm": 0.03636456124885245, | |
| "learning_rate": 7.141369669815841e-05, | |
| "loss": 0.2168, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.4634655532359082, | |
| "grad_norm": 0.03647208734905981, | |
| "learning_rate": 7.089640921379026e-05, | |
| "loss": 0.2178, | |
| "step": 701 | |
| }, | |
| { | |
| "epoch": 1.465553235908142, | |
| "grad_norm": 0.0398294556445215, | |
| "learning_rate": 7.038059813686224e-05, | |
| "loss": 0.2695, | |
| "step": 702 | |
| }, | |
| { | |
| "epoch": 1.4676409185803758, | |
| "grad_norm": 0.03444283718095077, | |
| "learning_rate": 6.986626936610491e-05, | |
| "loss": 0.2109, | |
| "step": 703 | |
| }, | |
| { | |
| "epoch": 1.4697286012526096, | |
| "grad_norm": 0.036117772890174496, | |
| "learning_rate": 6.935342878329774e-05, | |
| "loss": 0.1855, | |
| "step": 704 | |
| }, | |
| { | |
| "epoch": 1.4718162839248434, | |
| "grad_norm": 0.03485285291792577, | |
| "learning_rate": 6.884208225320121e-05, | |
| "loss": 0.1953, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 1.4739039665970772, | |
| "grad_norm": 0.03913119058204658, | |
| "learning_rate": 6.833223562349018e-05, | |
| "loss": 0.291, | |
| "step": 706 | |
| }, | |
| { | |
| "epoch": 1.475991649269311, | |
| "grad_norm": 0.03619007457603297, | |
| "learning_rate": 6.782389472468687e-05, | |
| "loss": 0.2256, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 1.4780793319415448, | |
| "grad_norm": 0.03954136112257214, | |
| "learning_rate": 6.731706537009437e-05, | |
| "loss": 0.2148, | |
| "step": 708 | |
| }, | |
| { | |
| "epoch": 1.4801670146137786, | |
| "grad_norm": 0.039048253382048426, | |
| "learning_rate": 6.68117533557297e-05, | |
| "loss": 0.2119, | |
| "step": 709 | |
| }, | |
| { | |
| "epoch": 1.4822546972860124, | |
| "grad_norm": 0.03633979845664376, | |
| "learning_rate": 6.630796446025793e-05, | |
| "loss": 0.207, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.4843423799582465, | |
| "grad_norm": 0.04042749139792089, | |
| "learning_rate": 6.580570444492626e-05, | |
| "loss": 0.2617, | |
| "step": 711 | |
| }, | |
| { | |
| "epoch": 1.4864300626304803, | |
| "grad_norm": 0.04112986991098296, | |
| "learning_rate": 6.530497905349753e-05, | |
| "loss": 0.2578, | |
| "step": 712 | |
| }, | |
| { | |
| "epoch": 1.488517745302714, | |
| "grad_norm": 0.04014939278609065, | |
| "learning_rate": 6.480579401218502e-05, | |
| "loss": 0.2324, | |
| "step": 713 | |
| }, | |
| { | |
| "epoch": 1.4906054279749479, | |
| "grad_norm": 0.03662270065596823, | |
| "learning_rate": 6.430815502958674e-05, | |
| "loss": 0.1689, | |
| "step": 714 | |
| }, | |
| { | |
| "epoch": 1.4926931106471817, | |
| "grad_norm": 0.033001164537146806, | |
| "learning_rate": 6.381206779662039e-05, | |
| "loss": 0.1826, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 1.4947807933194155, | |
| "grad_norm": 0.04093484928652971, | |
| "learning_rate": 6.331753798645796e-05, | |
| "loss": 0.249, | |
| "step": 716 | |
| }, | |
| { | |
| "epoch": 1.4968684759916493, | |
| "grad_norm": 0.03400770412940897, | |
| "learning_rate": 6.282457125446109e-05, | |
| "loss": 0.1709, | |
| "step": 717 | |
| }, | |
| { | |
| "epoch": 1.498956158663883, | |
| "grad_norm": 0.03449610130369155, | |
| "learning_rate": 6.233317323811615e-05, | |
| "loss": 0.2168, | |
| "step": 718 | |
| }, | |
| { | |
| "epoch": 1.501043841336117, | |
| "grad_norm": 0.03943007646918304, | |
| "learning_rate": 6.184334955697028e-05, | |
| "loss": 0.2617, | |
| "step": 719 | |
| }, | |
| { | |
| "epoch": 1.5031315240083507, | |
| "grad_norm": 0.04046270086854637, | |
| "learning_rate": 6.135510581256647e-05, | |
| "loss": 0.2715, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.5052192066805845, | |
| "grad_norm": 0.036042076960770504, | |
| "learning_rate": 6.086844758837991e-05, | |
| "loss": 0.2275, | |
| "step": 721 | |
| }, | |
| { | |
| "epoch": 1.5073068893528183, | |
| "grad_norm": 0.038478916176100005, | |
| "learning_rate": 6.0383380449754004e-05, | |
| "loss": 0.1924, | |
| "step": 722 | |
| }, | |
| { | |
| "epoch": 1.5093945720250521, | |
| "grad_norm": 0.0578411545976244, | |
| "learning_rate": 5.98999099438369e-05, | |
| "loss": 0.2852, | |
| "step": 723 | |
| }, | |
| { | |
| "epoch": 1.511482254697286, | |
| "grad_norm": 0.03739145944754586, | |
| "learning_rate": 5.941804159951778e-05, | |
| "loss": 0.2197, | |
| "step": 724 | |
| }, | |
| { | |
| "epoch": 1.5135699373695197, | |
| "grad_norm": 0.036832588153257496, | |
| "learning_rate": 5.893778092736382e-05, | |
| "loss": 0.2422, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.5156576200417535, | |
| "grad_norm": 0.03671021108564163, | |
| "learning_rate": 5.845913341955711e-05, | |
| "loss": 0.1572, | |
| "step": 726 | |
| }, | |
| { | |
| "epoch": 1.5177453027139873, | |
| "grad_norm": 0.03573073453794959, | |
| "learning_rate": 5.798210454983184e-05, | |
| "loss": 0.2168, | |
| "step": 727 | |
| }, | |
| { | |
| "epoch": 1.5198329853862211, | |
| "grad_norm": 0.035959632617295005, | |
| "learning_rate": 5.7506699773411764e-05, | |
| "loss": 0.2451, | |
| "step": 728 | |
| }, | |
| { | |
| "epoch": 1.5219206680584552, | |
| "grad_norm": 0.038728578675763334, | |
| "learning_rate": 5.703292452694771e-05, | |
| "loss": 0.2139, | |
| "step": 729 | |
| }, | |
| { | |
| "epoch": 1.524008350730689, | |
| "grad_norm": 0.035308915371426546, | |
| "learning_rate": 5.6560784228455586e-05, | |
| "loss": 0.2021, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.5260960334029228, | |
| "grad_norm": 0.039840996287124834, | |
| "learning_rate": 5.609028427725418e-05, | |
| "loss": 0.1953, | |
| "step": 731 | |
| }, | |
| { | |
| "epoch": 1.5281837160751566, | |
| "grad_norm": 0.04087634507351788, | |
| "learning_rate": 5.562143005390361e-05, | |
| "loss": 0.2617, | |
| "step": 732 | |
| }, | |
| { | |
| "epoch": 1.5302713987473904, | |
| "grad_norm": 0.03746689779610756, | |
| "learning_rate": 5.5154226920143626e-05, | |
| "loss": 0.2197, | |
| "step": 733 | |
| }, | |
| { | |
| "epoch": 1.5323590814196242, | |
| "grad_norm": 0.04226091666932121, | |
| "learning_rate": 5.468868021883256e-05, | |
| "loss": 0.2773, | |
| "step": 734 | |
| }, | |
| { | |
| "epoch": 1.534446764091858, | |
| "grad_norm": 0.03255698564490662, | |
| "learning_rate": 5.422479527388591e-05, | |
| "loss": 0.1816, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 1.536534446764092, | |
| "grad_norm": 0.03534998758506692, | |
| "learning_rate": 5.376257739021564e-05, | |
| "loss": 0.2158, | |
| "step": 736 | |
| }, | |
| { | |
| "epoch": 1.5386221294363258, | |
| "grad_norm": 0.04286917750609741, | |
| "learning_rate": 5.330203185366942e-05, | |
| "loss": 0.2559, | |
| "step": 737 | |
| }, | |
| { | |
| "epoch": 1.5407098121085596, | |
| "grad_norm": 0.03987124501813775, | |
| "learning_rate": 5.284316393097042e-05, | |
| "loss": 0.2197, | |
| "step": 738 | |
| }, | |
| { | |
| "epoch": 1.5427974947807934, | |
| "grad_norm": 0.0357279862009252, | |
| "learning_rate": 5.2385978869656705e-05, | |
| "loss": 0.209, | |
| "step": 739 | |
| }, | |
| { | |
| "epoch": 1.5448851774530272, | |
| "grad_norm": 0.03917856556644758, | |
| "learning_rate": 5.193048189802143e-05, | |
| "loss": 0.2402, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.546972860125261, | |
| "grad_norm": 0.05340411374410496, | |
| "learning_rate": 5.147667822505318e-05, | |
| "loss": 0.2695, | |
| "step": 741 | |
| }, | |
| { | |
| "epoch": 1.5490605427974948, | |
| "grad_norm": 0.041333285407966595, | |
| "learning_rate": 5.1024573040376115e-05, | |
| "loss": 0.2969, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 1.5511482254697286, | |
| "grad_norm": 0.039402130485741114, | |
| "learning_rate": 5.057417151419077e-05, | |
| "loss": 0.2402, | |
| "step": 743 | |
| }, | |
| { | |
| "epoch": 1.5532359081419624, | |
| "grad_norm": 0.035671385682246794, | |
| "learning_rate": 5.012547879721494e-05, | |
| "loss": 0.2158, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 1.5553235908141962, | |
| "grad_norm": 0.038417929606667435, | |
| "learning_rate": 4.967850002062491e-05, | |
| "loss": 0.2236, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 1.55741127348643, | |
| "grad_norm": 0.03949905474465816, | |
| "learning_rate": 4.923324029599632e-05, | |
| "loss": 0.2715, | |
| "step": 746 | |
| }, | |
| { | |
| "epoch": 1.5594989561586639, | |
| "grad_norm": 0.0352045032702957, | |
| "learning_rate": 4.878970471524622e-05, | |
| "loss": 0.2275, | |
| "step": 747 | |
| }, | |
| { | |
| "epoch": 1.5615866388308977, | |
| "grad_norm": 0.036303248719977955, | |
| "learning_rate": 4.834789835057465e-05, | |
| "loss": 0.2021, | |
| "step": 748 | |
| }, | |
| { | |
| "epoch": 1.5636743215031315, | |
| "grad_norm": 0.03443800350271572, | |
| "learning_rate": 4.790782625440655e-05, | |
| "loss": 0.1992, | |
| "step": 749 | |
| }, | |
| { | |
| "epoch": 1.5657620041753653, | |
| "grad_norm": 0.04231633067945213, | |
| "learning_rate": 4.7469493459334066e-05, | |
| "loss": 0.2617, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.567849686847599, | |
| "grad_norm": 0.037830454141616246, | |
| "learning_rate": 4.70329049780589e-05, | |
| "loss": 0.2217, | |
| "step": 751 | |
| }, | |
| { | |
| "epoch": 1.5699373695198329, | |
| "grad_norm": 0.037662635732679015, | |
| "learning_rate": 4.65980658033353e-05, | |
| "loss": 0.2188, | |
| "step": 752 | |
| }, | |
| { | |
| "epoch": 1.5720250521920667, | |
| "grad_norm": 0.04067293838178101, | |
| "learning_rate": 4.616498090791248e-05, | |
| "loss": 0.293, | |
| "step": 753 | |
| }, | |
| { | |
| "epoch": 1.5741127348643005, | |
| "grad_norm": 0.03901581955251283, | |
| "learning_rate": 4.5733655244478146e-05, | |
| "loss": 0.2266, | |
| "step": 754 | |
| }, | |
| { | |
| "epoch": 1.5762004175365343, | |
| "grad_norm": 0.03554948290229258, | |
| "learning_rate": 4.5304093745601605e-05, | |
| "loss": 0.1943, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 1.5782881002087683, | |
| "grad_norm": 0.0405280460733433, | |
| "learning_rate": 4.487630132367764e-05, | |
| "loss": 0.2617, | |
| "step": 756 | |
| }, | |
| { | |
| "epoch": 1.5803757828810021, | |
| "grad_norm": 0.03823926366289705, | |
| "learning_rate": 4.445028287087003e-05, | |
| "loss": 0.252, | |
| "step": 757 | |
| }, | |
| { | |
| "epoch": 1.582463465553236, | |
| "grad_norm": 0.03721532941881517, | |
| "learning_rate": 4.402604325905575e-05, | |
| "loss": 0.2129, | |
| "step": 758 | |
| }, | |
| { | |
| "epoch": 1.5845511482254697, | |
| "grad_norm": 0.029782665453401677, | |
| "learning_rate": 4.360358733976919e-05, | |
| "loss": 0.1826, | |
| "step": 759 | |
| }, | |
| { | |
| "epoch": 1.5866388308977035, | |
| "grad_norm": 0.03989589641175334, | |
| "learning_rate": 4.3182919944146936e-05, | |
| "loss": 0.2734, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.5887265135699373, | |
| "grad_norm": 0.04383942244265336, | |
| "learning_rate": 4.2764045882872086e-05, | |
| "loss": 0.3008, | |
| "step": 761 | |
| }, | |
| { | |
| "epoch": 1.5908141962421714, | |
| "grad_norm": 0.034818289814213685, | |
| "learning_rate": 4.2346969946119574e-05, | |
| "loss": 0.2188, | |
| "step": 762 | |
| }, | |
| { | |
| "epoch": 1.5929018789144052, | |
| "grad_norm": 0.03770186022731002, | |
| "learning_rate": 4.1931696903501204e-05, | |
| "loss": 0.2314, | |
| "step": 763 | |
| }, | |
| { | |
| "epoch": 1.594989561586639, | |
| "grad_norm": 0.03973578816213652, | |
| "learning_rate": 4.151823150401124e-05, | |
| "loss": 0.2402, | |
| "step": 764 | |
| }, | |
| { | |
| "epoch": 1.5970772442588728, | |
| "grad_norm": 0.03236007667945323, | |
| "learning_rate": 4.1106578475972035e-05, | |
| "loss": 0.1924, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 1.5991649269311066, | |
| "grad_norm": 0.03764508247239107, | |
| "learning_rate": 4.069674252697988e-05, | |
| "loss": 0.1748, | |
| "step": 766 | |
| }, | |
| { | |
| "epoch": 1.6012526096033404, | |
| "grad_norm": 0.03731281581422325, | |
| "learning_rate": 4.02887283438514e-05, | |
| "loss": 0.2559, | |
| "step": 767 | |
| }, | |
| { | |
| "epoch": 1.6033402922755742, | |
| "grad_norm": 0.0372624393342072, | |
| "learning_rate": 3.988254059256971e-05, | |
| "loss": 0.2188, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 1.605427974947808, | |
| "grad_norm": 0.039361210964991795, | |
| "learning_rate": 3.9478183918231106e-05, | |
| "loss": 0.2578, | |
| "step": 769 | |
| }, | |
| { | |
| "epoch": 1.6075156576200418, | |
| "grad_norm": 0.03750381742110697, | |
| "learning_rate": 3.907566294499201e-05, | |
| "loss": 0.1982, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.6096033402922756, | |
| "grad_norm": 0.03771991482783373, | |
| "learning_rate": 3.8674982276016205e-05, | |
| "loss": 0.2412, | |
| "step": 771 | |
| }, | |
| { | |
| "epoch": 1.6116910229645094, | |
| "grad_norm": 0.04091075279365844, | |
| "learning_rate": 3.82761464934219e-05, | |
| "loss": 0.2793, | |
| "step": 772 | |
| }, | |
| { | |
| "epoch": 1.6137787056367432, | |
| "grad_norm": 0.03859340968976196, | |
| "learning_rate": 3.787916015822954e-05, | |
| "loss": 0.2471, | |
| "step": 773 | |
| }, | |
| { | |
| "epoch": 1.615866388308977, | |
| "grad_norm": 0.03200544736921389, | |
| "learning_rate": 3.748402781030955e-05, | |
| "loss": 0.1826, | |
| "step": 774 | |
| }, | |
| { | |
| "epoch": 1.6179540709812108, | |
| "grad_norm": 0.0340269199931104, | |
| "learning_rate": 3.709075396833057e-05, | |
| "loss": 0.1855, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.6200417536534446, | |
| "grad_norm": 0.038072978162411425, | |
| "learning_rate": 3.66993431297076e-05, | |
| "loss": 0.2432, | |
| "step": 776 | |
| }, | |
| { | |
| "epoch": 1.6221294363256784, | |
| "grad_norm": 0.04102889623967079, | |
| "learning_rate": 3.63097997705506e-05, | |
| "loss": 0.3145, | |
| "step": 777 | |
| }, | |
| { | |
| "epoch": 1.6242171189979122, | |
| "grad_norm": 0.04007477882751308, | |
| "learning_rate": 3.592212834561337e-05, | |
| "loss": 0.2432, | |
| "step": 778 | |
| }, | |
| { | |
| "epoch": 1.626304801670146, | |
| "grad_norm": 0.03819036990721501, | |
| "learning_rate": 3.553633328824266e-05, | |
| "loss": 0.1973, | |
| "step": 779 | |
| }, | |
| { | |
| "epoch": 1.6283924843423798, | |
| "grad_norm": 0.038350798236554316, | |
| "learning_rate": 3.515241901032731e-05, | |
| "loss": 0.2676, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.6304801670146136, | |
| "grad_norm": 0.03548951603993466, | |
| "learning_rate": 3.477038990224786e-05, | |
| "loss": 0.2002, | |
| "step": 781 | |
| }, | |
| { | |
| "epoch": 1.6325678496868476, | |
| "grad_norm": 0.034615335672358714, | |
| "learning_rate": 3.439025033282639e-05, | |
| "loss": 0.1963, | |
| "step": 782 | |
| }, | |
| { | |
| "epoch": 1.6346555323590815, | |
| "grad_norm": 0.04345337713073096, | |
| "learning_rate": 3.401200464927654e-05, | |
| "loss": 0.2578, | |
| "step": 783 | |
| }, | |
| { | |
| "epoch": 1.6367432150313153, | |
| "grad_norm": 0.03791537054424221, | |
| "learning_rate": 3.363565717715373e-05, | |
| "loss": 0.2227, | |
| "step": 784 | |
| }, | |
| { | |
| "epoch": 1.638830897703549, | |
| "grad_norm": 0.04113195871715081, | |
| "learning_rate": 3.326121222030578e-05, | |
| "loss": 0.2012, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 1.6409185803757829, | |
| "grad_norm": 0.041884239195247946, | |
| "learning_rate": 3.288867406082372e-05, | |
| "loss": 0.2236, | |
| "step": 786 | |
| }, | |
| { | |
| "epoch": 1.6430062630480167, | |
| "grad_norm": 0.03652594444269533, | |
| "learning_rate": 3.251804695899267e-05, | |
| "loss": 0.2139, | |
| "step": 787 | |
| }, | |
| { | |
| "epoch": 1.6450939457202505, | |
| "grad_norm": 0.05643896674418451, | |
| "learning_rate": 3.214933515324323e-05, | |
| "loss": 0.2422, | |
| "step": 788 | |
| }, | |
| { | |
| "epoch": 1.6471816283924845, | |
| "grad_norm": 0.04098991740754866, | |
| "learning_rate": 3.178254286010296e-05, | |
| "loss": 0.2422, | |
| "step": 789 | |
| }, | |
| { | |
| "epoch": 1.6492693110647183, | |
| "grad_norm": 0.04098022159134268, | |
| "learning_rate": 3.1417674274148276e-05, | |
| "loss": 0.2402, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.651356993736952, | |
| "grad_norm": 0.037087942509587524, | |
| "learning_rate": 3.105473356795634e-05, | |
| "loss": 0.2295, | |
| "step": 791 | |
| }, | |
| { | |
| "epoch": 1.653444676409186, | |
| "grad_norm": 0.03775161487801411, | |
| "learning_rate": 3.0693724892057396e-05, | |
| "loss": 0.207, | |
| "step": 792 | |
| }, | |
| { | |
| "epoch": 1.6555323590814197, | |
| "grad_norm": 0.04001811834655189, | |
| "learning_rate": 3.0334652374887263e-05, | |
| "loss": 0.2773, | |
| "step": 793 | |
| }, | |
| { | |
| "epoch": 1.6576200417536535, | |
| "grad_norm": 0.03971800362165568, | |
| "learning_rate": 2.997752012274031e-05, | |
| "loss": 0.2139, | |
| "step": 794 | |
| }, | |
| { | |
| "epoch": 1.6597077244258873, | |
| "grad_norm": 0.03971183328914648, | |
| "learning_rate": 2.9622332219722193e-05, | |
| "loss": 0.2422, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 1.6617954070981211, | |
| "grad_norm": 0.03937647373747184, | |
| "learning_rate": 2.9269092727703375e-05, | |
| "loss": 0.252, | |
| "step": 796 | |
| }, | |
| { | |
| "epoch": 1.663883089770355, | |
| "grad_norm": 0.03693074285981622, | |
| "learning_rate": 2.891780568627267e-05, | |
| "loss": 0.1992, | |
| "step": 797 | |
| }, | |
| { | |
| "epoch": 1.6659707724425887, | |
| "grad_norm": 0.03819446049329271, | |
| "learning_rate": 2.8568475112690918e-05, | |
| "loss": 0.2383, | |
| "step": 798 | |
| }, | |
| { | |
| "epoch": 1.6680584551148225, | |
| "grad_norm": 0.03385681858741079, | |
| "learning_rate": 2.8221105001845095e-05, | |
| "loss": 0.1914, | |
| "step": 799 | |
| }, | |
| { | |
| "epoch": 1.6701461377870563, | |
| "grad_norm": 0.03481284448136041, | |
| "learning_rate": 2.7875699326202663e-05, | |
| "loss": 0.1943, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.6722338204592901, | |
| "grad_norm": 0.038887635380198286, | |
| "learning_rate": 2.7532262035766288e-05, | |
| "loss": 0.2236, | |
| "step": 801 | |
| }, | |
| { | |
| "epoch": 1.674321503131524, | |
| "grad_norm": 0.039268882753983984, | |
| "learning_rate": 2.7190797058028207e-05, | |
| "loss": 0.2402, | |
| "step": 802 | |
| }, | |
| { | |
| "epoch": 1.6764091858037578, | |
| "grad_norm": 0.03561285649555011, | |
| "learning_rate": 2.685130829792577e-05, | |
| "loss": 0.2041, | |
| "step": 803 | |
| }, | |
| { | |
| "epoch": 1.6784968684759916, | |
| "grad_norm": 0.03548955540319579, | |
| "learning_rate": 2.6513799637796745e-05, | |
| "loss": 0.2188, | |
| "step": 804 | |
| }, | |
| { | |
| "epoch": 1.6805845511482254, | |
| "grad_norm": 0.03987654406934741, | |
| "learning_rate": 2.61782749373346e-05, | |
| "loss": 0.2334, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 1.6826722338204592, | |
| "grad_norm": 0.03591235670312746, | |
| "learning_rate": 2.584473803354468e-05, | |
| "loss": 0.2217, | |
| "step": 806 | |
| }, | |
| { | |
| "epoch": 1.684759916492693, | |
| "grad_norm": 0.03563754130765487, | |
| "learning_rate": 2.5513192740700144e-05, | |
| "loss": 0.2061, | |
| "step": 807 | |
| }, | |
| { | |
| "epoch": 1.6868475991649268, | |
| "grad_norm": 0.03453991302292082, | |
| "learning_rate": 2.5183642850298505e-05, | |
| "loss": 0.207, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 1.6889352818371608, | |
| "grad_norm": 0.040646656045912735, | |
| "learning_rate": 2.4856092131018137e-05, | |
| "loss": 0.293, | |
| "step": 809 | |
| }, | |
| { | |
| "epoch": 1.6910229645093946, | |
| "grad_norm": 0.03639422717002704, | |
| "learning_rate": 2.4530544328675186e-05, | |
| "loss": 0.2236, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.6931106471816284, | |
| "grad_norm": 0.04124184507078617, | |
| "learning_rate": 2.4207003166180765e-05, | |
| "loss": 0.2344, | |
| "step": 811 | |
| }, | |
| { | |
| "epoch": 1.6951983298538622, | |
| "grad_norm": 0.04821861121500109, | |
| "learning_rate": 2.3885472343498515e-05, | |
| "loss": 0.2227, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 1.697286012526096, | |
| "grad_norm": 0.036261622126831795, | |
| "learning_rate": 2.3565955537602014e-05, | |
| "loss": 0.2266, | |
| "step": 813 | |
| }, | |
| { | |
| "epoch": 1.6993736951983298, | |
| "grad_norm": 0.03833360109114772, | |
| "learning_rate": 2.3248456402432918e-05, | |
| "loss": 0.2598, | |
| "step": 814 | |
| }, | |
| { | |
| "epoch": 1.7014613778705638, | |
| "grad_norm": 0.03811210564749943, | |
| "learning_rate": 2.293297856885912e-05, | |
| "loss": 0.2197, | |
| "step": 815 | |
| }, | |
| { | |
| "epoch": 1.7035490605427976, | |
| "grad_norm": 0.03440881800931742, | |
| "learning_rate": 2.261952564463332e-05, | |
| "loss": 0.1865, | |
| "step": 816 | |
| }, | |
| { | |
| "epoch": 1.7056367432150314, | |
| "grad_norm": 0.03784666030148996, | |
| "learning_rate": 2.2308101214351628e-05, | |
| "loss": 0.2324, | |
| "step": 817 | |
| }, | |
| { | |
| "epoch": 1.7077244258872653, | |
| "grad_norm": 0.03706179096561306, | |
| "learning_rate": 2.1998708839412597e-05, | |
| "loss": 0.21, | |
| "step": 818 | |
| }, | |
| { | |
| "epoch": 1.709812108559499, | |
| "grad_norm": 0.036230331032483534, | |
| "learning_rate": 2.1691352057976565e-05, | |
| "loss": 0.2471, | |
| "step": 819 | |
| }, | |
| { | |
| "epoch": 1.7118997912317329, | |
| "grad_norm": 0.04090973690460557, | |
| "learning_rate": 2.138603438492517e-05, | |
| "loss": 0.2637, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.7139874739039667, | |
| "grad_norm": 0.051662701812649245, | |
| "learning_rate": 2.108275931182111e-05, | |
| "loss": 0.1953, | |
| "step": 821 | |
| }, | |
| { | |
| "epoch": 1.7160751565762005, | |
| "grad_norm": 0.038240986750146325, | |
| "learning_rate": 2.0781530306868246e-05, | |
| "loss": 0.2393, | |
| "step": 822 | |
| }, | |
| { | |
| "epoch": 1.7181628392484343, | |
| "grad_norm": 0.04106815130974131, | |
| "learning_rate": 2.048235081487202e-05, | |
| "loss": 0.2637, | |
| "step": 823 | |
| }, | |
| { | |
| "epoch": 1.720250521920668, | |
| "grad_norm": 0.03673610429332063, | |
| "learning_rate": 2.0185224257199908e-05, | |
| "loss": 0.2207, | |
| "step": 824 | |
| }, | |
| { | |
| "epoch": 1.7223382045929019, | |
| "grad_norm": 0.04334868227291063, | |
| "learning_rate": 1.9890154031742325e-05, | |
| "loss": 0.3223, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.7244258872651357, | |
| "grad_norm": 0.04017653068637208, | |
| "learning_rate": 1.9597143512873872e-05, | |
| "loss": 0.2656, | |
| "step": 826 | |
| }, | |
| { | |
| "epoch": 1.7265135699373695, | |
| "grad_norm": 0.04143141277717955, | |
| "learning_rate": 1.9306196051414728e-05, | |
| "loss": 0.252, | |
| "step": 827 | |
| }, | |
| { | |
| "epoch": 1.7286012526096033, | |
| "grad_norm": 0.030689616701832047, | |
| "learning_rate": 1.9017314974592203e-05, | |
| "loss": 0.1982, | |
| "step": 828 | |
| }, | |
| { | |
| "epoch": 1.730688935281837, | |
| "grad_norm": 0.04149364456774439, | |
| "learning_rate": 1.873050358600279e-05, | |
| "loss": 0.2451, | |
| "step": 829 | |
| }, | |
| { | |
| "epoch": 1.732776617954071, | |
| "grad_norm": 0.037822515436121175, | |
| "learning_rate": 1.8445765165574324e-05, | |
| "loss": 0.2246, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.7348643006263047, | |
| "grad_norm": 0.036300250288018185, | |
| "learning_rate": 1.8163102969528702e-05, | |
| "loss": 0.1709, | |
| "step": 831 | |
| }, | |
| { | |
| "epoch": 1.7369519832985385, | |
| "grad_norm": 0.03813579853432033, | |
| "learning_rate": 1.7882520230344225e-05, | |
| "loss": 0.1982, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 1.7390396659707723, | |
| "grad_norm": 0.03490543301641405, | |
| "learning_rate": 1.7604020156719026e-05, | |
| "loss": 0.2256, | |
| "step": 833 | |
| }, | |
| { | |
| "epoch": 1.7411273486430061, | |
| "grad_norm": 0.03767801651422133, | |
| "learning_rate": 1.7327605933534087e-05, | |
| "loss": 0.248, | |
| "step": 834 | |
| }, | |
| { | |
| "epoch": 1.7432150313152401, | |
| "grad_norm": 0.03559965744869205, | |
| "learning_rate": 1.705328072181711e-05, | |
| "loss": 0.1963, | |
| "step": 835 | |
| }, | |
| { | |
| "epoch": 1.745302713987474, | |
| "grad_norm": 0.03620103246900227, | |
| "learning_rate": 1.6781047658706094e-05, | |
| "loss": 0.2363, | |
| "step": 836 | |
| }, | |
| { | |
| "epoch": 1.7473903966597077, | |
| "grad_norm": 0.03363208108698153, | |
| "learning_rate": 1.6510909857413593e-05, | |
| "loss": 0.208, | |
| "step": 837 | |
| }, | |
| { | |
| "epoch": 1.7494780793319415, | |
| "grad_norm": 0.038864402335742426, | |
| "learning_rate": 1.624287040719108e-05, | |
| "loss": 0.2217, | |
| "step": 838 | |
| }, | |
| { | |
| "epoch": 1.7515657620041754, | |
| "grad_norm": 0.03604996210348334, | |
| "learning_rate": 1.597693237329363e-05, | |
| "loss": 0.2363, | |
| "step": 839 | |
| }, | |
| { | |
| "epoch": 1.7536534446764092, | |
| "grad_norm": 0.03870805026122142, | |
| "learning_rate": 1.5713098796944913e-05, | |
| "loss": 0.2363, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.755741127348643, | |
| "grad_norm": 0.037967280022889786, | |
| "learning_rate": 1.545137269530228e-05, | |
| "loss": 0.2031, | |
| "step": 841 | |
| }, | |
| { | |
| "epoch": 1.757828810020877, | |
| "grad_norm": 0.032100601105533956, | |
| "learning_rate": 1.5191757061422484e-05, | |
| "loss": 0.1963, | |
| "step": 842 | |
| }, | |
| { | |
| "epoch": 1.7599164926931108, | |
| "grad_norm": 0.03677180115256404, | |
| "learning_rate": 1.493425486422717e-05, | |
| "loss": 0.2324, | |
| "step": 843 | |
| }, | |
| { | |
| "epoch": 1.7620041753653446, | |
| "grad_norm": 0.04179333438984321, | |
| "learning_rate": 1.467886904846918e-05, | |
| "loss": 0.2197, | |
| "step": 844 | |
| }, | |
| { | |
| "epoch": 1.7640918580375784, | |
| "grad_norm": 0.03330069993588093, | |
| "learning_rate": 1.4425602534698645e-05, | |
| "loss": 0.1992, | |
| "step": 845 | |
| }, | |
| { | |
| "epoch": 1.7661795407098122, | |
| "grad_norm": 0.03921593097418753, | |
| "learning_rate": 1.4174458219229868e-05, | |
| "loss": 0.2734, | |
| "step": 846 | |
| }, | |
| { | |
| "epoch": 1.768267223382046, | |
| "grad_norm": 0.0384869660070795, | |
| "learning_rate": 1.3925438974107918e-05, | |
| "loss": 0.2285, | |
| "step": 847 | |
| }, | |
| { | |
| "epoch": 1.7703549060542798, | |
| "grad_norm": 0.03235612278723638, | |
| "learning_rate": 1.3678547647075946e-05, | |
| "loss": 0.2021, | |
| "step": 848 | |
| }, | |
| { | |
| "epoch": 1.7724425887265136, | |
| "grad_norm": 0.034557637083813396, | |
| "learning_rate": 1.3433787061542525e-05, | |
| "loss": 0.1982, | |
| "step": 849 | |
| }, | |
| { | |
| "epoch": 1.7745302713987474, | |
| "grad_norm": 0.036691512115311166, | |
| "learning_rate": 1.3191160016549564e-05, | |
| "loss": 0.2695, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.7766179540709812, | |
| "grad_norm": 0.036890816812044876, | |
| "learning_rate": 1.2950669286740024e-05, | |
| "loss": 0.2119, | |
| "step": 851 | |
| }, | |
| { | |
| "epoch": 1.778705636743215, | |
| "grad_norm": 0.03801656481599902, | |
| "learning_rate": 1.271231762232632e-05, | |
| "loss": 0.2354, | |
| "step": 852 | |
| }, | |
| { | |
| "epoch": 1.7807933194154488, | |
| "grad_norm": 0.0431132757880389, | |
| "learning_rate": 1.2476107749058986e-05, | |
| "loss": 0.2305, | |
| "step": 853 | |
| }, | |
| { | |
| "epoch": 1.7828810020876826, | |
| "grad_norm": 0.034619924337342727, | |
| "learning_rate": 1.2242042368195218e-05, | |
| "loss": 0.1914, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 1.7849686847599164, | |
| "grad_norm": 0.034941322602160714, | |
| "learning_rate": 1.2010124156468294e-05, | |
| "loss": 0.2051, | |
| "step": 855 | |
| }, | |
| { | |
| "epoch": 1.7870563674321502, | |
| "grad_norm": 0.03577282594283845, | |
| "learning_rate": 1.1780355766056694e-05, | |
| "loss": 0.2334, | |
| "step": 856 | |
| }, | |
| { | |
| "epoch": 1.789144050104384, | |
| "grad_norm": 0.03923352027261708, | |
| "learning_rate": 1.1552739824554026e-05, | |
| "loss": 0.2217, | |
| "step": 857 | |
| }, | |
| { | |
| "epoch": 1.7912317327766178, | |
| "grad_norm": 0.03464671275803716, | |
| "learning_rate": 1.1327278934938723e-05, | |
| "loss": 0.1992, | |
| "step": 858 | |
| }, | |
| { | |
| "epoch": 1.7933194154488517, | |
| "grad_norm": 0.04157357259303964, | |
| "learning_rate": 1.1103975675544443e-05, | |
| "loss": 0.2461, | |
| "step": 859 | |
| }, | |
| { | |
| "epoch": 1.7954070981210855, | |
| "grad_norm": 0.040227594429237123, | |
| "learning_rate": 1.0882832600030624e-05, | |
| "loss": 0.2441, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.7974947807933193, | |
| "grad_norm": 0.03488250123821703, | |
| "learning_rate": 1.0663852237353067e-05, | |
| "loss": 0.1963, | |
| "step": 861 | |
| }, | |
| { | |
| "epoch": 1.7995824634655533, | |
| "grad_norm": 0.03979475512791435, | |
| "learning_rate": 1.0447037091735223e-05, | |
| "loss": 0.2471, | |
| "step": 862 | |
| }, | |
| { | |
| "epoch": 1.801670146137787, | |
| "grad_norm": 0.03997223499064563, | |
| "learning_rate": 1.0232389642639395e-05, | |
| "loss": 0.2471, | |
| "step": 863 | |
| }, | |
| { | |
| "epoch": 1.803757828810021, | |
| "grad_norm": 0.04038899286733726, | |
| "learning_rate": 1.0019912344738625e-05, | |
| "loss": 0.2598, | |
| "step": 864 | |
| }, | |
| { | |
| "epoch": 1.8058455114822547, | |
| "grad_norm": 0.03748451896837008, | |
| "learning_rate": 9.809607627888296e-06, | |
| "loss": 0.2002, | |
| "step": 865 | |
| }, | |
| { | |
| "epoch": 1.8079331941544885, | |
| "grad_norm": 0.03427034514456546, | |
| "learning_rate": 9.601477897098576e-06, | |
| "loss": 0.1729, | |
| "step": 866 | |
| }, | |
| { | |
| "epoch": 1.8100208768267223, | |
| "grad_norm": 0.04471055633543089, | |
| "learning_rate": 9.395525532506866e-06, | |
| "loss": 0.2256, | |
| "step": 867 | |
| }, | |
| { | |
| "epoch": 1.812108559498956, | |
| "grad_norm": 0.03475782341698621, | |
| "learning_rate": 9.191752889350547e-06, | |
| "loss": 0.2207, | |
| "step": 868 | |
| }, | |
| { | |
| "epoch": 1.8141962421711901, | |
| "grad_norm": 0.03828798248246293, | |
| "learning_rate": 8.990162297940097e-06, | |
| "loss": 0.2109, | |
| "step": 869 | |
| }, | |
| { | |
| "epoch": 1.816283924843424, | |
| "grad_norm": 0.04180601763110725, | |
| "learning_rate": 8.79075606363231e-06, | |
| "loss": 0.2676, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.8183716075156577, | |
| "grad_norm": 0.03646735664376503, | |
| "learning_rate": 8.593536466804142e-06, | |
| "loss": 0.207, | |
| "step": 871 | |
| }, | |
| { | |
| "epoch": 1.8204592901878915, | |
| "grad_norm": 0.03218572550634406, | |
| "learning_rate": 8.398505762826503e-06, | |
| "loss": 0.1797, | |
| "step": 872 | |
| }, | |
| { | |
| "epoch": 1.8225469728601253, | |
| "grad_norm": 0.03982709976184709, | |
| "learning_rate": 8.205666182038418e-06, | |
| "loss": 0.2432, | |
| "step": 873 | |
| }, | |
| { | |
| "epoch": 1.8246346555323592, | |
| "grad_norm": 0.037982327489911684, | |
| "learning_rate": 8.015019929721668e-06, | |
| "loss": 0.2344, | |
| "step": 874 | |
| }, | |
| { | |
| "epoch": 1.826722338204593, | |
| "grad_norm": 0.03580893689768604, | |
| "learning_rate": 7.826569186075428e-06, | |
| "loss": 0.252, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.8288100208768268, | |
| "grad_norm": 0.035854237119299116, | |
| "learning_rate": 7.640316106191403e-06, | |
| "loss": 0.1963, | |
| "step": 876 | |
| }, | |
| { | |
| "epoch": 1.8308977035490606, | |
| "grad_norm": 0.03548915356785303, | |
| "learning_rate": 7.4562628200292475e-06, | |
| "loss": 0.2148, | |
| "step": 877 | |
| }, | |
| { | |
| "epoch": 1.8329853862212944, | |
| "grad_norm": 0.03888900644009877, | |
| "learning_rate": 7.274411432392026e-06, | |
| "loss": 0.2812, | |
| "step": 878 | |
| }, | |
| { | |
| "epoch": 1.8350730688935282, | |
| "grad_norm": 0.036848786139394796, | |
| "learning_rate": 7.0947640229023675e-06, | |
| "loss": 0.1904, | |
| "step": 879 | |
| }, | |
| { | |
| "epoch": 1.837160751565762, | |
| "grad_norm": 0.032594039620939604, | |
| "learning_rate": 6.917322645978463e-06, | |
| "loss": 0.1904, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.8392484342379958, | |
| "grad_norm": 0.038807798544498055, | |
| "learning_rate": 6.742089330810774e-06, | |
| "loss": 0.2383, | |
| "step": 881 | |
| }, | |
| { | |
| "epoch": 1.8413361169102296, | |
| "grad_norm": 0.042279119809553053, | |
| "learning_rate": 6.5690660813386484e-06, | |
| "loss": 0.3184, | |
| "step": 882 | |
| }, | |
| { | |
| "epoch": 1.8434237995824634, | |
| "grad_norm": 0.03575523378084652, | |
| "learning_rate": 6.398254876227561e-06, | |
| "loss": 0.1875, | |
| "step": 883 | |
| }, | |
| { | |
| "epoch": 1.8455114822546972, | |
| "grad_norm": 0.03438492023581685, | |
| "learning_rate": 6.229657668846333e-06, | |
| "loss": 0.21, | |
| "step": 884 | |
| }, | |
| { | |
| "epoch": 1.847599164926931, | |
| "grad_norm": 0.04355612584591555, | |
| "learning_rate": 6.0632763872449535e-06, | |
| "loss": 0.2754, | |
| "step": 885 | |
| }, | |
| { | |
| "epoch": 1.8496868475991648, | |
| "grad_norm": 0.03533181091838774, | |
| "learning_rate": 5.899112934132345e-06, | |
| "loss": 0.1689, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 1.8517745302713986, | |
| "grad_norm": 0.040660080641486476, | |
| "learning_rate": 5.73716918685483e-06, | |
| "loss": 0.2471, | |
| "step": 887 | |
| }, | |
| { | |
| "epoch": 1.8538622129436324, | |
| "grad_norm": 0.03235795768355878, | |
| "learning_rate": 5.577446997374458e-06, | |
| "loss": 0.1826, | |
| "step": 888 | |
| }, | |
| { | |
| "epoch": 1.8559498956158664, | |
| "grad_norm": 0.03609407610302125, | |
| "learning_rate": 5.4199481922479146e-06, | |
| "loss": 0.1816, | |
| "step": 889 | |
| }, | |
| { | |
| "epoch": 1.8580375782881002, | |
| "grad_norm": 0.03647085365814291, | |
| "learning_rate": 5.264674572605665e-06, | |
| "loss": 0.2393, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.860125260960334, | |
| "grad_norm": 0.03892481200719314, | |
| "learning_rate": 5.11162791413129e-06, | |
| "loss": 0.2021, | |
| "step": 891 | |
| }, | |
| { | |
| "epoch": 1.8622129436325678, | |
| "grad_norm": 0.03859819801659933, | |
| "learning_rate": 4.9608099670412025e-06, | |
| "loss": 0.2402, | |
| "step": 892 | |
| }, | |
| { | |
| "epoch": 1.8643006263048016, | |
| "grad_norm": 0.03638768037937149, | |
| "learning_rate": 4.812222456064697e-06, | |
| "loss": 0.1729, | |
| "step": 893 | |
| }, | |
| { | |
| "epoch": 1.8663883089770354, | |
| "grad_norm": 0.03578385304837865, | |
| "learning_rate": 4.665867080424046e-06, | |
| "loss": 0.2217, | |
| "step": 894 | |
| }, | |
| { | |
| "epoch": 1.8684759916492695, | |
| "grad_norm": 0.03899470496061886, | |
| "learning_rate": 4.521745513815345e-06, | |
| "loss": 0.2236, | |
| "step": 895 | |
| }, | |
| { | |
| "epoch": 1.8705636743215033, | |
| "grad_norm": 0.03565199656237529, | |
| "learning_rate": 4.379859404389075e-06, | |
| "loss": 0.2158, | |
| "step": 896 | |
| }, | |
| { | |
| "epoch": 1.872651356993737, | |
| "grad_norm": 0.03700782502510176, | |
| "learning_rate": 4.240210374731479e-06, | |
| "loss": 0.2441, | |
| "step": 897 | |
| }, | |
| { | |
| "epoch": 1.8747390396659709, | |
| "grad_norm": 0.034012775228610695, | |
| "learning_rate": 4.10280002184591e-06, | |
| "loss": 0.1934, | |
| "step": 898 | |
| }, | |
| { | |
| "epoch": 1.8768267223382047, | |
| "grad_norm": 0.032231872294556166, | |
| "learning_rate": 3.967629917134574e-06, | |
| "loss": 0.1689, | |
| "step": 899 | |
| }, | |
| { | |
| "epoch": 1.8789144050104385, | |
| "grad_norm": 0.039125390388194826, | |
| "learning_rate": 3.834701606380575e-06, | |
| "loss": 0.2285, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.8810020876826723, | |
| "grad_norm": 0.04416962700457997, | |
| "learning_rate": 3.7040166097301877e-06, | |
| "loss": 0.2656, | |
| "step": 901 | |
| }, | |
| { | |
| "epoch": 1.883089770354906, | |
| "grad_norm": 0.04111412261545973, | |
| "learning_rate": 3.575576421675586e-06, | |
| "loss": 0.248, | |
| "step": 902 | |
| }, | |
| { | |
| "epoch": 1.88517745302714, | |
| "grad_norm": 0.03397805823890809, | |
| "learning_rate": 3.449382511037613e-06, | |
| "loss": 0.1904, | |
| "step": 903 | |
| }, | |
| { | |
| "epoch": 1.8872651356993737, | |
| "grad_norm": 0.041722415283545866, | |
| "learning_rate": 3.3254363209491046e-06, | |
| "loss": 0.3301, | |
| "step": 904 | |
| }, | |
| { | |
| "epoch": 1.8893528183716075, | |
| "grad_norm": 0.047301660349510964, | |
| "learning_rate": 3.203739268838324e-06, | |
| "loss": 0.2988, | |
| "step": 905 | |
| }, | |
| { | |
| "epoch": 1.8914405010438413, | |
| "grad_norm": 0.03091322302919706, | |
| "learning_rate": 3.0842927464127537e-06, | |
| "loss": 0.1592, | |
| "step": 906 | |
| }, | |
| { | |
| "epoch": 1.8935281837160751, | |
| "grad_norm": 0.038250787995771175, | |
| "learning_rate": 2.9670981196431525e-06, | |
| "loss": 0.248, | |
| "step": 907 | |
| }, | |
| { | |
| "epoch": 1.895615866388309, | |
| "grad_norm": 0.04184624767282992, | |
| "learning_rate": 2.8521567287480344e-06, | |
| "loss": 0.2461, | |
| "step": 908 | |
| }, | |
| { | |
| "epoch": 1.8977035490605427, | |
| "grad_norm": 0.03669678971086905, | |
| "learning_rate": 2.7394698881782367e-06, | |
| "loss": 0.1895, | |
| "step": 909 | |
| }, | |
| { | |
| "epoch": 1.8997912317327765, | |
| "grad_norm": 0.03332356389562326, | |
| "learning_rate": 2.6290388866019533e-06, | |
| "loss": 0.1934, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.9018789144050103, | |
| "grad_norm": 0.03567454628961636, | |
| "learning_rate": 2.5208649868899704e-06, | |
| "loss": 0.2207, | |
| "step": 911 | |
| }, | |
| { | |
| "epoch": 1.9039665970772441, | |
| "grad_norm": 0.03723478614951914, | |
| "learning_rate": 2.4149494261012097e-06, | |
| "loss": 0.2002, | |
| "step": 912 | |
| }, | |
| { | |
| "epoch": 1.906054279749478, | |
| "grad_norm": 0.03840886510699952, | |
| "learning_rate": 2.3112934154686296e-06, | |
| "loss": 0.1982, | |
| "step": 913 | |
| }, | |
| { | |
| "epoch": 1.9081419624217117, | |
| "grad_norm": 0.03658052263392586, | |
| "learning_rate": 2.2098981403852804e-06, | |
| "loss": 0.2295, | |
| "step": 914 | |
| }, | |
| { | |
| "epoch": 1.9102296450939458, | |
| "grad_norm": 0.042943855139820596, | |
| "learning_rate": 2.1107647603908933e-06, | |
| "loss": 0.2422, | |
| "step": 915 | |
| }, | |
| { | |
| "epoch": 1.9123173277661796, | |
| "grad_norm": 0.038181568479852754, | |
| "learning_rate": 2.0138944091585123e-06, | |
| "loss": 0.1768, | |
| "step": 916 | |
| }, | |
| { | |
| "epoch": 1.9144050104384134, | |
| "grad_norm": 0.04044402128707661, | |
| "learning_rate": 1.91928819448155e-06, | |
| "loss": 0.249, | |
| "step": 917 | |
| }, | |
| { | |
| "epoch": 1.9164926931106472, | |
| "grad_norm": 0.04021345081714543, | |
| "learning_rate": 1.8269471982611086e-06, | |
| "loss": 0.2578, | |
| "step": 918 | |
| }, | |
| { | |
| "epoch": 1.918580375782881, | |
| "grad_norm": 0.03765027742041835, | |
| "learning_rate": 1.7368724764936118e-06, | |
| "loss": 0.2275, | |
| "step": 919 | |
| }, | |
| { | |
| "epoch": 1.9206680584551148, | |
| "grad_norm": 0.03276750255084186, | |
| "learning_rate": 1.6490650592588363e-06, | |
| "loss": 0.1797, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.9227557411273486, | |
| "grad_norm": 0.03836166999423338, | |
| "learning_rate": 1.5635259507079224e-06, | |
| "loss": 0.2656, | |
| "step": 921 | |
| }, | |
| { | |
| "epoch": 1.9248434237995826, | |
| "grad_norm": 0.040716455351115934, | |
| "learning_rate": 1.4802561290520934e-06, | |
| "loss": 0.2373, | |
| "step": 922 | |
| }, | |
| { | |
| "epoch": 1.9269311064718164, | |
| "grad_norm": 0.044722992742083724, | |
| "learning_rate": 1.3992565465512863e-06, | |
| "loss": 0.2832, | |
| "step": 923 | |
| }, | |
| { | |
| "epoch": 1.9290187891440502, | |
| "grad_norm": 0.035718501429723326, | |
| "learning_rate": 1.3205281295034512e-06, | |
| "loss": 0.2109, | |
| "step": 924 | |
| }, | |
| { | |
| "epoch": 1.931106471816284, | |
| "grad_norm": 0.040897036162254374, | |
| "learning_rate": 1.2440717782338018e-06, | |
| "loss": 0.248, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.9331941544885178, | |
| "grad_norm": 0.037701179364082175, | |
| "learning_rate": 1.169888367084626e-06, | |
| "loss": 0.2021, | |
| "step": 926 | |
| }, | |
| { | |
| "epoch": 1.9352818371607516, | |
| "grad_norm": 0.03800126744938023, | |
| "learning_rate": 1.0979787444051815e-06, | |
| "loss": 0.2051, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 1.9373695198329854, | |
| "grad_norm": 0.0376053096363303, | |
| "learning_rate": 1.0283437325421252e-06, | |
| "loss": 0.25, | |
| "step": 928 | |
| }, | |
| { | |
| "epoch": 1.9394572025052192, | |
| "grad_norm": 0.03852373604309701, | |
| "learning_rate": 9.60984127829989e-07, | |
| "loss": 0.1895, | |
| "step": 929 | |
| }, | |
| { | |
| "epoch": 1.941544885177453, | |
| "grad_norm": 0.05141719488909021, | |
| "learning_rate": 8.95900700582164e-07, | |
| "loss": 0.2969, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.9436325678496869, | |
| "grad_norm": 0.0422557444402279, | |
| "learning_rate": 8.330941950819738e-07, | |
| "loss": 0.2793, | |
| "step": 931 | |
| }, | |
| { | |
| "epoch": 1.9457202505219207, | |
| "grad_norm": 0.03878673278759856, | |
| "learning_rate": 7.725653295743484e-07, | |
| "loss": 0.2412, | |
| "step": 932 | |
| }, | |
| { | |
| "epoch": 1.9478079331941545, | |
| "grad_norm": 0.04515324231545769, | |
| "learning_rate": 7.14314796257387e-07, | |
| "loss": 0.2715, | |
| "step": 933 | |
| }, | |
| { | |
| "epoch": 1.9498956158663883, | |
| "grad_norm": 0.04060496658163304, | |
| "learning_rate": 6.58343261274652e-07, | |
| "loss": 0.2559, | |
| "step": 934 | |
| }, | |
| { | |
| "epoch": 1.951983298538622, | |
| "grad_norm": 0.0410883973174675, | |
| "learning_rate": 6.046513647074203e-07, | |
| "loss": 0.2676, | |
| "step": 935 | |
| }, | |
| { | |
| "epoch": 1.9540709812108559, | |
| "grad_norm": 0.037425826708713576, | |
| "learning_rate": 5.532397205673556e-07, | |
| "loss": 0.208, | |
| "step": 936 | |
| }, | |
| { | |
| "epoch": 1.9561586638830897, | |
| "grad_norm": 0.03711360512013879, | |
| "learning_rate": 5.04108916789603e-07, | |
| "loss": 0.2246, | |
| "step": 937 | |
| }, | |
| { | |
| "epoch": 1.9582463465553235, | |
| "grad_norm": 0.04129502522262085, | |
| "learning_rate": 4.5725951522592734e-07, | |
| "loss": 0.2812, | |
| "step": 938 | |
| }, | |
| { | |
| "epoch": 1.9603340292275573, | |
| "grad_norm": 0.042768062565203456, | |
| "learning_rate": 4.126920516384303e-07, | |
| "loss": 0.2734, | |
| "step": 939 | |
| }, | |
| { | |
| "epoch": 1.962421711899791, | |
| "grad_norm": 0.03636718305173902, | |
| "learning_rate": 3.704070356932432e-07, | |
| "loss": 0.2275, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.964509394572025, | |
| "grad_norm": 0.03914519289291409, | |
| "learning_rate": 3.3040495095491006e-07, | |
| "loss": 0.2051, | |
| "step": 941 | |
| }, | |
| { | |
| "epoch": 1.966597077244259, | |
| "grad_norm": 0.036435492987632524, | |
| "learning_rate": 2.926862548806364e-07, | |
| "loss": 0.21, | |
| "step": 942 | |
| }, | |
| { | |
| "epoch": 1.9686847599164927, | |
| "grad_norm": 0.04520438858168889, | |
| "learning_rate": 2.5725137881529306e-07, | |
| "loss": 0.2852, | |
| "step": 943 | |
| }, | |
| { | |
| "epoch": 1.9707724425887265, | |
| "grad_norm": 0.036719387383711895, | |
| "learning_rate": 2.2410072798624283e-07, | |
| "loss": 0.2422, | |
| "step": 944 | |
| }, | |
| { | |
| "epoch": 1.9728601252609603, | |
| "grad_norm": 0.03890839218462144, | |
| "learning_rate": 1.9323468149892165e-07, | |
| "loss": 0.2617, | |
| "step": 945 | |
| }, | |
| { | |
| "epoch": 1.9749478079331941, | |
| "grad_norm": 0.04401800316091134, | |
| "learning_rate": 1.6465359233233114e-07, | |
| "loss": 0.2314, | |
| "step": 946 | |
| }, | |
| { | |
| "epoch": 1.977035490605428, | |
| "grad_norm": 0.042526472328545395, | |
| "learning_rate": 1.383577873351305e-07, | |
| "loss": 0.2734, | |
| "step": 947 | |
| }, | |
| { | |
| "epoch": 1.9791231732776617, | |
| "grad_norm": 0.04123640104260365, | |
| "learning_rate": 1.143475672218175e-07, | |
| "loss": 0.2715, | |
| "step": 948 | |
| }, | |
| { | |
| "epoch": 1.9812108559498958, | |
| "grad_norm": 0.041143122691388076, | |
| "learning_rate": 9.26232065693089e-08, | |
| "loss": 0.2578, | |
| "step": 949 | |
| }, | |
| { | |
| "epoch": 1.9832985386221296, | |
| "grad_norm": 0.03646932624947557, | |
| "learning_rate": 7.318495381383184e-08, | |
| "loss": 0.1953, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.9853862212943634, | |
| "grad_norm": 0.042625916797236235, | |
| "learning_rate": 5.603303124803727e-08, | |
| "loss": 0.2344, | |
| "step": 951 | |
| }, | |
| { | |
| "epoch": 1.9874739039665972, | |
| "grad_norm": 0.039620902455050594, | |
| "learning_rate": 4.1167635018513064e-08, | |
| "loss": 0.2598, | |
| "step": 952 | |
| }, | |
| { | |
| "epoch": 1.989561586638831, | |
| "grad_norm": 0.0329100227494744, | |
| "learning_rate": 2.8588935123474714e-08, | |
| "loss": 0.1758, | |
| "step": 953 | |
| }, | |
| { | |
| "epoch": 1.9916492693110648, | |
| "grad_norm": 0.03829557846462901, | |
| "learning_rate": 1.8297075410877997e-08, | |
| "loss": 0.2676, | |
| "step": 954 | |
| }, | |
| { | |
| "epoch": 1.9937369519832986, | |
| "grad_norm": 0.037593692719248624, | |
| "learning_rate": 1.02921735767314e-08, | |
| "loss": 0.1943, | |
| "step": 955 | |
| }, | |
| { | |
| "epoch": 1.9958246346555324, | |
| "grad_norm": 0.03806855037144764, | |
| "learning_rate": 4.574321163763884e-09, | |
| "loss": 0.2383, | |
| "step": 956 | |
| }, | |
| { | |
| "epoch": 1.9979123173277662, | |
| "grad_norm": 0.03559582629136701, | |
| "learning_rate": 1.1435835604034495e-09, | |
| "loss": 0.1787, | |
| "step": 957 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.03239360059033573, | |
| "learning_rate": 0.0, | |
| "loss": 0.167, | |
| "step": 958 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 958, | |
| "total_flos": 5.650597015940235e+18, | |
| "train_loss": 0.3088689816022965, | |
| "train_runtime": 6060.4121, | |
| "train_samples_per_second": 90.951, | |
| "train_steps_per_second": 0.158 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 958, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.650597015940235e+18, | |
| "train_batch_size": 72, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |