PyFi-QwenVL-3B-47K / trainer_state.json
Yuqun-Zhang's picture
Upload folder using huggingface_hub
2115b7c verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1473,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00679001867255135,
"grad_norm": 1.8175065517425537,
"learning_rate": 6.081081081081082e-06,
"loss": 0.1857,
"step": 10
},
{
"epoch": 0.0135800373451027,
"grad_norm": 0.9356499910354614,
"learning_rate": 1.2837837837837838e-05,
"loss": 0.1512,
"step": 20
},
{
"epoch": 0.02037005601765405,
"grad_norm": 0.19530220329761505,
"learning_rate": 1.9594594594594595e-05,
"loss": 0.1038,
"step": 30
},
{
"epoch": 0.0271600746902054,
"grad_norm": 0.1939963847398758,
"learning_rate": 2.635135135135135e-05,
"loss": 0.082,
"step": 40
},
{
"epoch": 0.03395009336275675,
"grad_norm": 0.25467729568481445,
"learning_rate": 3.310810810810811e-05,
"loss": 0.0825,
"step": 50
},
{
"epoch": 0.0407401120353081,
"grad_norm": 0.24394609034061432,
"learning_rate": 3.986486486486487e-05,
"loss": 0.0841,
"step": 60
},
{
"epoch": 0.04753013070785945,
"grad_norm": 0.18351706862449646,
"learning_rate": 4.662162162162162e-05,
"loss": 0.0795,
"step": 70
},
{
"epoch": 0.0543201493804108,
"grad_norm": 0.20120970904827118,
"learning_rate": 5.337837837837838e-05,
"loss": 0.072,
"step": 80
},
{
"epoch": 0.06111016805296215,
"grad_norm": 0.30063867568969727,
"learning_rate": 6.013513513513514e-05,
"loss": 0.0752,
"step": 90
},
{
"epoch": 0.0679001867255135,
"grad_norm": 0.35980555415153503,
"learning_rate": 6.68918918918919e-05,
"loss": 0.0673,
"step": 100
},
{
"epoch": 0.07469020539806484,
"grad_norm": 0.37400639057159424,
"learning_rate": 7.364864864864865e-05,
"loss": 0.069,
"step": 110
},
{
"epoch": 0.0814802240706162,
"grad_norm": 0.27199000120162964,
"learning_rate": 8.040540540540541e-05,
"loss": 0.0601,
"step": 120
},
{
"epoch": 0.08827024274316754,
"grad_norm": 0.3759700655937195,
"learning_rate": 8.716216216216216e-05,
"loss": 0.0655,
"step": 130
},
{
"epoch": 0.0950602614157189,
"grad_norm": 0.4591917395591736,
"learning_rate": 9.391891891891892e-05,
"loss": 0.0461,
"step": 140
},
{
"epoch": 0.10185028008827024,
"grad_norm": 0.2534618675708771,
"learning_rate": 9.999985945746134e-05,
"loss": 0.0515,
"step": 150
},
{
"epoch": 0.1086402987608216,
"grad_norm": 0.23862995207309723,
"learning_rate": 9.998299530880787e-05,
"loss": 0.054,
"step": 160
},
{
"epoch": 0.11543031743337294,
"grad_norm": 0.3287966847419739,
"learning_rate": 9.993803351513094e-05,
"loss": 0.041,
"step": 170
},
{
"epoch": 0.1222203361059243,
"grad_norm": 0.3776935636997223,
"learning_rate": 9.986499935143679e-05,
"loss": 0.0355,
"step": 180
},
{
"epoch": 0.12901035477847564,
"grad_norm": 0.503021776676178,
"learning_rate": 9.976393387344834e-05,
"loss": 0.0417,
"step": 190
},
{
"epoch": 0.135800373451027,
"grad_norm": 0.27203965187072754,
"learning_rate": 9.963489389452596e-05,
"loss": 0.0312,
"step": 200
},
{
"epoch": 0.14259039212357835,
"grad_norm": 0.37624090909957886,
"learning_rate": 9.947795195373016e-05,
"loss": 0.0379,
"step": 210
},
{
"epoch": 0.14938041079612968,
"grad_norm": 0.45090368390083313,
"learning_rate": 9.92931962750442e-05,
"loss": 0.0354,
"step": 220
},
{
"epoch": 0.15617042946868104,
"grad_norm": 0.26543551683425903,
"learning_rate": 9.908073071777954e-05,
"loss": 0.0274,
"step": 230
},
{
"epoch": 0.1629604481412324,
"grad_norm": 0.5471091270446777,
"learning_rate": 9.884067471819184e-05,
"loss": 0.0226,
"step": 240
},
{
"epoch": 0.16975046681378375,
"grad_norm": 0.5838286876678467,
"learning_rate": 9.857316322234067e-05,
"loss": 0.0288,
"step": 250
},
{
"epoch": 0.17654048548633508,
"grad_norm": 0.16055133938789368,
"learning_rate": 9.827834661023034e-05,
"loss": 0.0262,
"step": 260
},
{
"epoch": 0.18333050415888644,
"grad_norm": 0.33942335844039917,
"learning_rate": 9.795639061127468e-05,
"loss": 0.0233,
"step": 270
},
{
"epoch": 0.1901205228314378,
"grad_norm": 0.5522304177284241,
"learning_rate": 9.760747621113325e-05,
"loss": 0.0168,
"step": 280
},
{
"epoch": 0.19691054150398912,
"grad_norm": 0.6882764101028442,
"learning_rate": 9.723179954997125e-05,
"loss": 0.0184,
"step": 290
},
{
"epoch": 0.20370056017654048,
"grad_norm": 0.30410072207450867,
"learning_rate": 9.682957181220062e-05,
"loss": 0.0203,
"step": 300
},
{
"epoch": 0.21049057884909184,
"grad_norm": 0.30174189805984497,
"learning_rate": 9.640101910776381e-05,
"loss": 0.0191,
"step": 310
},
{
"epoch": 0.2172805975216432,
"grad_norm": 0.14357490837574005,
"learning_rate": 9.594638234502753e-05,
"loss": 0.0268,
"step": 320
},
{
"epoch": 0.22407061619419452,
"grad_norm": 0.19478002190589905,
"learning_rate": 9.546591709535751e-05,
"loss": 0.0218,
"step": 330
},
{
"epoch": 0.23086063486674588,
"grad_norm": 0.22178731858730316,
"learning_rate": 9.495989344945056e-05,
"loss": 0.0121,
"step": 340
},
{
"epoch": 0.23765065353929724,
"grad_norm": 0.5312681794166565,
"learning_rate": 9.442859586550468e-05,
"loss": 0.0096,
"step": 350
},
{
"epoch": 0.2444406722118486,
"grad_norm": 0.43574365973472595,
"learning_rate": 9.387232300931255e-05,
"loss": 0.0129,
"step": 360
},
{
"epoch": 0.2512306908843999,
"grad_norm": 0.029588760808110237,
"learning_rate": 9.32913875863684e-05,
"loss": 0.0193,
"step": 370
},
{
"epoch": 0.2580207095569513,
"grad_norm": 0.41019147634506226,
"learning_rate": 9.268611616608236e-05,
"loss": 0.0143,
"step": 380
},
{
"epoch": 0.26481072822950263,
"grad_norm": 0.13079501688480377,
"learning_rate": 9.205684899820147e-05,
"loss": 0.0197,
"step": 390
},
{
"epoch": 0.271600746902054,
"grad_norm": 0.04413610324263573,
"learning_rate": 9.140393982154026e-05,
"loss": 0.0163,
"step": 400
},
{
"epoch": 0.27839076557460535,
"grad_norm": 0.33774223923683167,
"learning_rate": 9.072775566512863e-05,
"loss": 0.0126,
"step": 410
},
{
"epoch": 0.2851807842471567,
"grad_norm": 0.11442425101995468,
"learning_rate": 9.002867664188858e-05,
"loss": 0.0049,
"step": 420
},
{
"epoch": 0.291970802919708,
"grad_norm": 0.24148213863372803,
"learning_rate": 8.930709573495602e-05,
"loss": 0.0061,
"step": 430
},
{
"epoch": 0.29876082159225936,
"grad_norm": 0.49706923961639404,
"learning_rate": 8.856341857676758e-05,
"loss": 0.0069,
"step": 440
},
{
"epoch": 0.3055508402648107,
"grad_norm": 0.8686341047286987,
"learning_rate": 8.779806322103682e-05,
"loss": 0.0056,
"step": 450
},
{
"epoch": 0.3123408589373621,
"grad_norm": 0.3294643461704254,
"learning_rate": 8.701145990774775e-05,
"loss": 0.0074,
"step": 460
},
{
"epoch": 0.31913087760991343,
"grad_norm": 0.14896813035011292,
"learning_rate": 8.620405082129798e-05,
"loss": 0.0098,
"step": 470
},
{
"epoch": 0.3259208962824648,
"grad_norm": 0.009663172997534275,
"learning_rate": 8.537628984192736e-05,
"loss": 0.0058,
"step": 480
},
{
"epoch": 0.33271091495501615,
"grad_norm": 0.03118247166275978,
"learning_rate": 8.452864229057191e-05,
"loss": 0.0066,
"step": 490
},
{
"epoch": 0.3395009336275675,
"grad_norm": 0.2592410743236542,
"learning_rate": 8.366158466728644e-05,
"loss": 0.0038,
"step": 500
},
{
"epoch": 0.3462909523001188,
"grad_norm": 0.006336142774671316,
"learning_rate": 8.277560438338278e-05,
"loss": 0.0063,
"step": 510
},
{
"epoch": 0.35308097097267016,
"grad_norm": 0.9991777539253235,
"learning_rate": 8.18711994874345e-05,
"loss": 0.008,
"step": 520
},
{
"epoch": 0.3598709896452215,
"grad_norm": 0.03354354575276375,
"learning_rate": 8.094887838530174e-05,
"loss": 0.0023,
"step": 530
},
{
"epoch": 0.3666610083177729,
"grad_norm": 0.1517736166715622,
"learning_rate": 8.000915955433396e-05,
"loss": 0.0072,
"step": 540
},
{
"epoch": 0.37345102699032423,
"grad_norm": 0.3716999888420105,
"learning_rate": 7.905257125191094e-05,
"loss": 0.0072,
"step": 550
},
{
"epoch": 0.3802410456628756,
"grad_norm": 0.15485776960849762,
"learning_rate": 7.807965121848597e-05,
"loss": 0.0061,
"step": 560
},
{
"epoch": 0.38703106433542694,
"grad_norm": 0.015828760340809822,
"learning_rate": 7.70909463752983e-05,
"loss": 0.0031,
"step": 570
},
{
"epoch": 0.39382108300797825,
"grad_norm": 0.472591370344162,
"learning_rate": 7.608701251692457e-05,
"loss": 0.0061,
"step": 580
},
{
"epoch": 0.4006111016805296,
"grad_norm": 0.49039226770401,
"learning_rate": 7.506841399884217e-05,
"loss": 0.0032,
"step": 590
},
{
"epoch": 0.40740112035308096,
"grad_norm": 0.013108485378324986,
"learning_rate": 7.403572342018021e-05,
"loss": 0.0083,
"step": 600
},
{
"epoch": 0.4141911390256323,
"grad_norm": 0.014069960452616215,
"learning_rate": 7.29895213018362e-05,
"loss": 0.0076,
"step": 610
},
{
"epoch": 0.4209811576981837,
"grad_norm": 0.016907596960663795,
"learning_rate": 7.193039576013976e-05,
"loss": 0.0035,
"step": 620
},
{
"epoch": 0.42777117637073503,
"grad_norm": 0.02205067314207554,
"learning_rate": 7.085894217624645e-05,
"loss": 0.0035,
"step": 630
},
{
"epoch": 0.4345611950432864,
"grad_norm": 0.0153163131326437,
"learning_rate": 6.977576286144784e-05,
"loss": 0.0008,
"step": 640
},
{
"epoch": 0.44135121371583774,
"grad_norm": 0.0036254250444471836,
"learning_rate": 6.868146671858567e-05,
"loss": 0.0061,
"step": 650
},
{
"epoch": 0.44814123238838904,
"grad_norm": 0.026128802448511124,
"learning_rate": 6.757666889976085e-05,
"loss": 0.0004,
"step": 660
},
{
"epoch": 0.4549312510609404,
"grad_norm": 0.009841466322541237,
"learning_rate": 6.646199046052921e-05,
"loss": 0.0054,
"step": 670
},
{
"epoch": 0.46172126973349176,
"grad_norm": 0.1286161094903946,
"learning_rate": 6.533805801077899e-05,
"loss": 0.0083,
"step": 680
},
{
"epoch": 0.4685112884060431,
"grad_norm": 0.1420389711856842,
"learning_rate": 6.420550336248558e-05,
"loss": 0.0011,
"step": 690
},
{
"epoch": 0.47530130707859447,
"grad_norm": 0.019207162782549858,
"learning_rate": 6.306496317454227e-05,
"loss": 0.0014,
"step": 700
},
{
"epoch": 0.4820913257511458,
"grad_norm": 0.02628934569656849,
"learning_rate": 6.191707859486622e-05,
"loss": 0.0071,
"step": 710
},
{
"epoch": 0.4888813444236972,
"grad_norm": 0.07600024342536926,
"learning_rate": 6.076249489998097e-05,
"loss": 0.0007,
"step": 720
},
{
"epoch": 0.49567136309624854,
"grad_norm": 0.057020459324121475,
"learning_rate": 5.9601861132278e-05,
"loss": 0.0029,
"step": 730
},
{
"epoch": 0.5024613817687998,
"grad_norm": 0.18588444590568542,
"learning_rate": 5.8435829735161416e-05,
"loss": 0.0015,
"step": 740
},
{
"epoch": 0.5092514004413512,
"grad_norm": 0.08587218075990677,
"learning_rate": 5.7265056186280754e-05,
"loss": 0.0064,
"step": 750
},
{
"epoch": 0.5160414191139026,
"grad_norm": 0.027101216837763786,
"learning_rate": 5.6090198629058134e-05,
"loss": 0.0002,
"step": 760
},
{
"epoch": 0.5228314377864539,
"grad_norm": 0.008036206476390362,
"learning_rate": 5.491191750271677e-05,
"loss": 0.0096,
"step": 770
},
{
"epoch": 0.5296214564590053,
"grad_norm": 0.02794210985302925,
"learning_rate": 5.373087517101899e-05,
"loss": 0.0005,
"step": 780
},
{
"epoch": 0.5364114751315566,
"grad_norm": 0.015519050881266594,
"learning_rate": 5.254773554992242e-05,
"loss": 0.0012,
"step": 790
},
{
"epoch": 0.543201493804108,
"grad_norm": 0.5680004954338074,
"learning_rate": 5.136316373436343e-05,
"loss": 0.0011,
"step": 800
},
{
"epoch": 0.5499915124766593,
"grad_norm": 0.01568940281867981,
"learning_rate": 5.0177825624378114e-05,
"loss": 0.0006,
"step": 810
},
{
"epoch": 0.5567815311492107,
"grad_norm": 0.15695148706436157,
"learning_rate": 4.8992387550770373e-05,
"loss": 0.0064,
"step": 820
},
{
"epoch": 0.563571549821762,
"grad_norm": 0.24050813913345337,
"learning_rate": 4.780751590053813e-05,
"loss": 0.0054,
"step": 830
},
{
"epoch": 0.5703615684943134,
"grad_norm": 0.008626952767372131,
"learning_rate": 4.662387674226771e-05,
"loss": 0.0032,
"step": 840
},
{
"epoch": 0.5771515871668647,
"grad_norm": 0.007616691756993532,
"learning_rate": 4.544213545170741e-05,
"loss": 0.0039,
"step": 850
},
{
"epoch": 0.583941605839416,
"grad_norm": 0.014656963758170605,
"learning_rate": 4.4262956337730385e-05,
"loss": 0.0012,
"step": 860
},
{
"epoch": 0.5907316245119674,
"grad_norm": 0.2970845401287079,
"learning_rate": 4.3087002268897434e-05,
"loss": 0.0011,
"step": 870
},
{
"epoch": 0.5975216431845187,
"grad_norm": 0.010836818255484104,
"learning_rate": 4.191493430082929e-05,
"loss": 0.0059,
"step": 880
},
{
"epoch": 0.6043116618570701,
"grad_norm": 0.016124485060572624,
"learning_rate": 4.074741130459813e-05,
"loss": 0.0051,
"step": 890
},
{
"epoch": 0.6111016805296214,
"grad_norm": 0.008550368249416351,
"learning_rate": 3.9585089596347094e-05,
"loss": 0.0006,
"step": 900
},
{
"epoch": 0.6178916992021728,
"grad_norm": 0.007996040396392345,
"learning_rate": 3.842862256834613e-05,
"loss": 0.0013,
"step": 910
},
{
"epoch": 0.6246817178747242,
"grad_norm": 0.01987188495695591,
"learning_rate": 3.727866032169127e-05,
"loss": 0.002,
"step": 920
},
{
"epoch": 0.6314717365472755,
"grad_norm": 0.006365388166159391,
"learning_rate": 3.6135849300854225e-05,
"loss": 0.0003,
"step": 930
},
{
"epoch": 0.6382617552198269,
"grad_norm": 0.029804030433297157,
"learning_rate": 3.500083193028741e-05,
"loss": 0.0052,
"step": 940
},
{
"epoch": 0.6450517738923782,
"grad_norm": 0.009540177881717682,
"learning_rate": 3.387424625328892e-05,
"loss": 0.0004,
"step": 950
},
{
"epoch": 0.6518417925649296,
"grad_norm": 0.007210243493318558,
"learning_rate": 3.275672557333015e-05,
"loss": 0.0038,
"step": 960
},
{
"epoch": 0.6586318112374809,
"grad_norm": 0.024687010794878006,
"learning_rate": 3.164889809804808e-05,
"loss": 0.0022,
"step": 970
},
{
"epoch": 0.6654218299100323,
"grad_norm": 0.005847941618412733,
"learning_rate": 3.055138658610202e-05,
"loss": 0.0035,
"step": 980
},
{
"epoch": 0.6722118485825836,
"grad_norm": 0.005948640406131744,
"learning_rate": 2.946480799709358e-05,
"loss": 0.0015,
"step": 990
},
{
"epoch": 0.679001867255135,
"grad_norm": 0.018232271075248718,
"learning_rate": 2.83897731447464e-05,
"loss": 0.0056,
"step": 1000
},
{
"epoch": 0.6857918859276863,
"grad_norm": 0.4337656795978546,
"learning_rate": 2.7326886353540916e-05,
"loss": 0.0027,
"step": 1010
},
{
"epoch": 0.6925819046002376,
"grad_norm": 0.4810737371444702,
"learning_rate": 2.627674511899678e-05,
"loss": 0.0042,
"step": 1020
},
{
"epoch": 0.699371923272789,
"grad_norm": 0.024683628231287003,
"learning_rate": 2.5239939771794563e-05,
"loss": 0.0006,
"step": 1030
},
{
"epoch": 0.7061619419453403,
"grad_norm": 0.3206782639026642,
"learning_rate": 2.421705314592452e-05,
"loss": 0.0038,
"step": 1040
},
{
"epoch": 0.7129519606178917,
"grad_norm": 0.194807767868042,
"learning_rate": 2.3208660251050158e-05,
"loss": 0.0012,
"step": 1050
},
{
"epoch": 0.719741979290443,
"grad_norm": 0.009538485668599606,
"learning_rate": 2.2215327949269886e-05,
"loss": 0.0004,
"step": 1060
},
{
"epoch": 0.7265319979629944,
"grad_norm": 0.00484565319493413,
"learning_rate": 2.123761463645878e-05,
"loss": 0.0003,
"step": 1070
},
{
"epoch": 0.7333220166355457,
"grad_norm": 0.0036626686342060566,
"learning_rate": 2.0276069928369746e-05,
"loss": 0.0002,
"step": 1080
},
{
"epoch": 0.7401120353080971,
"grad_norm": 0.006346094887703657,
"learning_rate": 1.933123435167018e-05,
"loss": 0.0008,
"step": 1090
},
{
"epoch": 0.7469020539806485,
"grad_norm": 0.0050748297944664955,
"learning_rate": 1.840363904008819e-05,
"loss": 0.0002,
"step": 1100
},
{
"epoch": 0.7536920726531998,
"grad_norm": 0.0035511874593794346,
"learning_rate": 1.7493805435838874e-05,
"loss": 0.0002,
"step": 1110
},
{
"epoch": 0.7604820913257512,
"grad_norm": 0.01719609834253788,
"learning_rate": 1.660224499649861e-05,
"loss": 0.0012,
"step": 1120
},
{
"epoch": 0.7672721099983025,
"grad_norm": 0.5908659100532532,
"learning_rate": 1.5729458907492327e-05,
"loss": 0.0021,
"step": 1130
},
{
"epoch": 0.7740621286708539,
"grad_norm": 0.0023723470512777567,
"learning_rate": 1.4875937800354988e-05,
"loss": 0.0002,
"step": 1140
},
{
"epoch": 0.7808521473434052,
"grad_norm": 0.003473122837021947,
"learning_rate": 1.404216147692598e-05,
"loss": 0.0001,
"step": 1150
},
{
"epoch": 0.7876421660159565,
"grad_norm": 0.0032885069958865643,
"learning_rate": 1.3228598639631418e-05,
"loss": 0.0046,
"step": 1160
},
{
"epoch": 0.7944321846885078,
"grad_norm": 0.004905693233013153,
"learning_rate": 1.2435706628005767e-05,
"loss": 0.0016,
"step": 1170
},
{
"epoch": 0.8012222033610592,
"grad_norm": 0.0058155064471066,
"learning_rate": 1.1663931161601188e-05,
"loss": 0.0027,
"step": 1180
},
{
"epoch": 0.8080122220336106,
"grad_norm": 0.007166674826294184,
"learning_rate": 1.0913706089428932e-05,
"loss": 0.003,
"step": 1190
},
{
"epoch": 0.8148022407061619,
"grad_norm": 0.002422675024718046,
"learning_rate": 1.0185453146073604e-05,
"loss": 0.0002,
"step": 1200
},
{
"epoch": 0.8215922593787133,
"grad_norm": 0.5606914758682251,
"learning_rate": 9.479581714617668e-06,
"loss": 0.0043,
"step": 1210
},
{
"epoch": 0.8283822780512646,
"grad_norm": 0.018740450963377953,
"learning_rate": 8.796488596509133e-06,
"loss": 0.0011,
"step": 1220
},
{
"epoch": 0.835172296723816,
"grad_norm": 0.0064006103202700615,
"learning_rate": 8.136557788501903e-06,
"loss": 0.0023,
"step": 1230
},
{
"epoch": 0.8419623153963673,
"grad_norm": 0.01389048807322979,
"learning_rate": 7.500160266794371e-06,
"loss": 0.0003,
"step": 1240
},
{
"epoch": 0.8487523340689187,
"grad_norm": 0.0191491711884737,
"learning_rate": 6.887653778487307e-06,
"loss": 0.0037,
"step": 1250
},
{
"epoch": 0.8555423527414701,
"grad_norm": 0.010205433703958988,
"learning_rate": 6.2993826404783965e-06,
"loss": 0.0004,
"step": 1260
},
{
"epoch": 0.8623323714140214,
"grad_norm": 0.005938360001891851,
"learning_rate": 5.735677545906626e-06,
"loss": 0.0002,
"step": 1270
},
{
"epoch": 0.8691223900865728,
"grad_norm": 0.42641371488571167,
"learning_rate": 5.196855378254989e-06,
"loss": 0.0019,
"step": 1280
},
{
"epoch": 0.8759124087591241,
"grad_norm": 0.008787781931459904,
"learning_rate": 4.683219033216402e-06,
"loss": 0.0006,
"step": 1290
},
{
"epoch": 0.8827024274316755,
"grad_norm": 0.001734372926875949,
"learning_rate": 4.1950572484226345e-06,
"loss": 0.0002,
"step": 1300
},
{
"epoch": 0.8894924461042268,
"grad_norm": 0.13424113392829895,
"learning_rate": 3.7326444411321547e-06,
"loss": 0.0003,
"step": 1310
},
{
"epoch": 0.8962824647767781,
"grad_norm": 0.0027601660694926977,
"learning_rate": 3.2962405539681217e-06,
"loss": 0.0002,
"step": 1320
},
{
"epoch": 0.9030724834493294,
"grad_norm": 0.011034502647817135,
"learning_rate": 2.8860909087931543e-06,
"loss": 0.0002,
"step": 1330
},
{
"epoch": 0.9098625021218808,
"grad_norm": 0.00510817626491189,
"learning_rate": 2.5024260688030987e-06,
"loss": 0.0002,
"step": 1340
},
{
"epoch": 0.9166525207944322,
"grad_norm": 0.006623192224651575,
"learning_rate": 2.145461708917312e-06,
"loss": 0.0019,
"step": 1350
},
{
"epoch": 0.9234425394669835,
"grad_norm": 0.008591300807893276,
"learning_rate": 1.8153984945382452e-06,
"loss": 0.0002,
"step": 1360
},
{
"epoch": 0.9302325581395349,
"grad_norm": 0.0064789485186338425,
"learning_rate": 1.512421968748623e-06,
"loss": 0.0006,
"step": 1370
},
{
"epoch": 0.9370225768120862,
"grad_norm": 0.009375182911753654,
"learning_rate": 1.2367024480094691e-06,
"loss": 0.0026,
"step": 1380
},
{
"epoch": 0.9438125954846376,
"grad_norm": 0.005188928451389074,
"learning_rate": 9.883949264176962e-07,
"loss": 0.0003,
"step": 1390
},
{
"epoch": 0.9506026141571889,
"grad_norm": 0.006225943099707365,
"learning_rate": 7.676389885771518e-07,
"loss": 0.0015,
"step": 1400
},
{
"epoch": 0.9573926328297403,
"grad_norm": 0.0153355086222291,
"learning_rate": 5.745587311318968e-07,
"loss": 0.0002,
"step": 1410
},
{
"epoch": 0.9641826515022917,
"grad_norm": 0.0042914156801998615,
"learning_rate": 4.0926269300603503e-07,
"loss": 0.0002,
"step": 1420
},
{
"epoch": 0.970972670174843,
"grad_norm": 0.011045188643038273,
"learning_rate": 2.71843794389226e-07,
"loss": 0.0002,
"step": 1430
},
{
"epoch": 0.9777626888473944,
"grad_norm": 0.00516318716108799,
"learning_rate": 1.623792845020955e-07,
"loss": 0.0004,
"step": 1440
},
{
"epoch": 0.9845527075199457,
"grad_norm": 0.008642218075692654,
"learning_rate": 8.093069817109445e-08,
"loss": 0.0002,
"step": 1450
},
{
"epoch": 0.9913427261924971,
"grad_norm": 0.010795416310429573,
"learning_rate": 2.7543821237030475e-08,
"loss": 0.0002,
"step": 1460
},
{
"epoch": 0.9981327448650483,
"grad_norm": 0.012065191753208637,
"learning_rate": 2.2486648168396075e-09,
"loss": 0.0002,
"step": 1470
}
],
"logging_steps": 10,
"max_steps": 1473,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.075096721816289e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}