| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 138, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.39333338141441343, | |
| "epoch": 0.03640776699029126, | |
| "grad_norm": 5.541357291512188, | |
| "learning_rate": 2.8571428571428573e-06, | |
| "loss": 0.7481, | |
| "mean_token_accuracy": 0.8530021011829376, | |
| "num_tokens": 634119.0, | |
| "step": 5 | |
| }, | |
| { | |
| "entropy": 0.46669652859369914, | |
| "epoch": 0.07281553398058252, | |
| "grad_norm": 3.55874761602301, | |
| "learning_rate": 6.4285714285714295e-06, | |
| "loss": 0.4894, | |
| "mean_token_accuracy": 0.8847795287768047, | |
| "num_tokens": 1273023.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.2333444893360138, | |
| "epoch": 0.10922330097087378, | |
| "grad_norm": 0.683356185791462, | |
| "learning_rate": 1e-05, | |
| "loss": 0.2045, | |
| "mean_token_accuracy": 0.9458525717258454, | |
| "num_tokens": 1901638.0, | |
| "step": 15 | |
| }, | |
| { | |
| "entropy": 0.14950522979100545, | |
| "epoch": 0.14563106796116504, | |
| "grad_norm": 0.4345966321170585, | |
| "learning_rate": 9.959935885253715e-06, | |
| "loss": 0.1443, | |
| "mean_token_accuracy": 0.9594493408997854, | |
| "num_tokens": 2543415.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 0.1172327217956384, | |
| "epoch": 0.1820388349514563, | |
| "grad_norm": 0.4083364127472746, | |
| "learning_rate": 9.840385594331022e-06, | |
| "loss": 0.1135, | |
| "mean_token_accuracy": 0.9674430509408315, | |
| "num_tokens": 3177702.0, | |
| "step": 25 | |
| }, | |
| { | |
| "entropy": 0.10821170260508856, | |
| "epoch": 0.21844660194174756, | |
| "grad_norm": 0.3274112578074671, | |
| "learning_rate": 9.643264997861312e-06, | |
| "loss": 0.1053, | |
| "mean_token_accuracy": 0.969423896074295, | |
| "num_tokens": 3817129.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 0.10363089988629023, | |
| "epoch": 0.25485436893203883, | |
| "grad_norm": 0.2651750049561702, | |
| "learning_rate": 9.371733080722911e-06, | |
| "loss": 0.101, | |
| "mean_token_accuracy": 0.9701913992563883, | |
| "num_tokens": 4450713.0, | |
| "step": 35 | |
| }, | |
| { | |
| "entropy": 0.09188476577401161, | |
| "epoch": 0.2912621359223301, | |
| "grad_norm": 0.3322177757328344, | |
| "learning_rate": 9.030141317270026e-06, | |
| "loss": 0.0896, | |
| "mean_token_accuracy": 0.9733767688274384, | |
| "num_tokens": 5099094.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.09189487385253112, | |
| "epoch": 0.3276699029126214, | |
| "grad_norm": 0.27499471047406965, | |
| "learning_rate": 8.6239639361456e-06, | |
| "loss": 0.0892, | |
| "mean_token_accuracy": 0.9734424968560537, | |
| "num_tokens": 5746298.0, | |
| "step": 45 | |
| }, | |
| { | |
| "entropy": 0.09149620061119397, | |
| "epoch": 0.3640776699029126, | |
| "grad_norm": 0.2426196466051672, | |
| "learning_rate": 8.15971019223152e-06, | |
| "loss": 0.0888, | |
| "mean_token_accuracy": 0.9731526096661886, | |
| "num_tokens": 6396438.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.08535861944158872, | |
| "epoch": 0.40048543689320387, | |
| "grad_norm": 0.25072345756012987, | |
| "learning_rate": 7.644820051634813e-06, | |
| "loss": 0.0833, | |
| "mean_token_accuracy": 0.9750010808308919, | |
| "num_tokens": 7045524.0, | |
| "step": 55 | |
| }, | |
| { | |
| "entropy": 0.08259956675271193, | |
| "epoch": 0.4368932038834951, | |
| "grad_norm": 0.2328355084383256, | |
| "learning_rate": 7.087544961425317e-06, | |
| "loss": 0.079, | |
| "mean_token_accuracy": 0.9756735742092133, | |
| "num_tokens": 7696316.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.08420657614866893, | |
| "epoch": 0.4733009708737864, | |
| "grad_norm": 0.2627786396905739, | |
| "learning_rate": 6.496815614866792e-06, | |
| "loss": 0.0834, | |
| "mean_token_accuracy": 0.9745731969674428, | |
| "num_tokens": 8335632.0, | |
| "step": 65 | |
| }, | |
| { | |
| "entropy": 0.07922674703101316, | |
| "epoch": 0.5097087378640777, | |
| "grad_norm": 0.23198033388844116, | |
| "learning_rate": 5.882098831289044e-06, | |
| "loss": 0.0764, | |
| "mean_token_accuracy": 0.9763782580693563, | |
| "num_tokens": 8980788.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.08260756817956766, | |
| "epoch": 0.5461165048543689, | |
| "grad_norm": 0.23246807001208797, | |
| "learning_rate": 5.253245844193564e-06, | |
| "loss": 0.0784, | |
| "mean_token_accuracy": 0.975836310784022, | |
| "num_tokens": 9615133.0, | |
| "step": 75 | |
| }, | |
| { | |
| "entropy": 0.0775965532908837, | |
| "epoch": 0.5825242718446602, | |
| "grad_norm": 0.23509755618479103, | |
| "learning_rate": 4.62033442887377e-06, | |
| "loss": 0.075, | |
| "mean_token_accuracy": 0.9768483221530915, | |
| "num_tokens": 10272323.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.0796258964886268, | |
| "epoch": 0.6189320388349514, | |
| "grad_norm": 0.2398016429212428, | |
| "learning_rate": 3.993507399556699e-06, | |
| "loss": 0.0769, | |
| "mean_token_accuracy": 0.9760827680428823, | |
| "num_tokens": 10909479.0, | |
| "step": 85 | |
| }, | |
| { | |
| "entropy": 0.08220818725725015, | |
| "epoch": 0.6553398058252428, | |
| "grad_norm": 0.22572998880022216, | |
| "learning_rate": 3.3828100642538097e-06, | |
| "loss": 0.0779, | |
| "mean_token_accuracy": 0.9757848223050435, | |
| "num_tokens": 11539056.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.07774231346944968, | |
| "epoch": 0.691747572815534, | |
| "grad_norm": 0.22123308445803502, | |
| "learning_rate": 2.7980292422118282e-06, | |
| "loss": 0.0739, | |
| "mean_token_accuracy": 0.9770070095856984, | |
| "num_tokens": 12175621.0, | |
| "step": 95 | |
| }, | |
| { | |
| "entropy": 0.07790198549628258, | |
| "epoch": 0.7281553398058253, | |
| "grad_norm": 0.22549132644894587, | |
| "learning_rate": 2.2485364238130435e-06, | |
| "loss": 0.0743, | |
| "mean_token_accuracy": 0.9770615140597025, | |
| "num_tokens": 12820187.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.07972427035371463, | |
| "epoch": 0.7645631067961165, | |
| "grad_norm": 0.22395855789852193, | |
| "learning_rate": 1.74313758638889e-06, | |
| "loss": 0.0753, | |
| "mean_token_accuracy": 0.9765656272570292, | |
| "num_tokens": 13471214.0, | |
| "step": 105 | |
| }, | |
| { | |
| "entropy": 0.07861521604160468, | |
| "epoch": 0.8009708737864077, | |
| "grad_norm": 0.21707494419816833, | |
| "learning_rate": 1.2899320727454472e-06, | |
| "loss": 0.0758, | |
| "mean_token_accuracy": 0.9762462894121806, | |
| "num_tokens": 14104842.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07681530776123206, | |
| "epoch": 0.837378640776699, | |
| "grad_norm": 0.2048280957572369, | |
| "learning_rate": 8.961827939636198e-07, | |
| "loss": 0.0752, | |
| "mean_token_accuracy": 0.9767355759938557, | |
| "num_tokens": 14754145.0, | |
| "step": 115 | |
| }, | |
| { | |
| "entropy": 0.07755378099779288, | |
| "epoch": 0.8737864077669902, | |
| "grad_norm": 0.21592550407174824, | |
| "learning_rate": 5.681998365579594e-07, | |
| "loss": 0.0743, | |
| "mean_token_accuracy": 0.9771900673707327, | |
| "num_tokens": 15399256.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.07576616617540519, | |
| "epoch": 0.9101941747572816, | |
| "grad_norm": 0.19062427853278816, | |
| "learning_rate": 3.112393392645985e-07, | |
| "loss": 0.0727, | |
| "mean_token_accuracy": 0.9775316794713338, | |
| "num_tokens": 16043949.0, | |
| "step": 125 | |
| }, | |
| { | |
| "entropy": 0.07121096017460028, | |
| "epoch": 0.9466019417475728, | |
| "grad_norm": 0.20849501542469207, | |
| "learning_rate": 1.2941926002306536e-07, | |
| "loss": 0.068, | |
| "mean_token_accuracy": 0.9787502566973368, | |
| "num_tokens": 16696773.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.07767875120043755, | |
| "epoch": 0.9830097087378641, | |
| "grad_norm": 0.19245780653274294, | |
| "learning_rate": 2.5653383040524228e-08, | |
| "loss": 0.0731, | |
| "mean_token_accuracy": 0.9773090521494547, | |
| "num_tokens": 17351570.0, | |
| "step": 135 | |
| }, | |
| { | |
| "entropy": 0.07084919246179718, | |
| "epoch": 1.0, | |
| "mean_token_accuracy": 0.9790157548018864, | |
| "num_tokens": 17657223.0, | |
| "step": 138, | |
| "total_flos": 39414093316096.0, | |
| "train_loss": 0.1270361747862636, | |
| "train_runtime": 1974.4991, | |
| "train_samples_per_second": 3.338, | |
| "train_steps_per_second": 0.07 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 138, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 39414093316096.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |