| { | |
| "best_global_step": 120, | |
| "best_metric": 0.7616215348243713, | |
| "best_model_checkpoint": "/dss/dssfs05/pn39qo/pn39qo-dss-0001/tong/efficient_reasoning/extraction-vs-summary-efficient-cot-reasoning-perspective---Experiment-main/output/lora/Limo_llama/checkpoint-120", | |
| "epoch": 10.0, | |
| "eval_steps": 500, | |
| "global_step": 120, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.16842105263157894, | |
| "grad_norm": 0.1806342601776123, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 0.981, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.3368421052631579, | |
| "grad_norm": 0.18056336045265198, | |
| "learning_rate": 4e-05, | |
| "loss": 0.9863, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.5052631578947369, | |
| "grad_norm": 0.16754654049873352, | |
| "learning_rate": 6.666666666666667e-05, | |
| "loss": 0.9869, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.6736842105263158, | |
| "grad_norm": 0.24061395227909088, | |
| "learning_rate": 7.998481228099806e-05, | |
| "loss": 0.9226, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.23444652557373047, | |
| "learning_rate": 7.98633797202668e-05, | |
| "loss": 0.9103, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.18950825929641724, | |
| "learning_rate": 7.962088338550013e-05, | |
| "loss": 0.9047, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.9275959730148315, | |
| "eval_runtime": 14.0544, | |
| "eval_samples_per_second": 2.846, | |
| "eval_steps_per_second": 0.712, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.168421052631579, | |
| "grad_norm": 0.15563638508319855, | |
| "learning_rate": 7.925805973009672e-05, | |
| "loss": 0.8801, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.3368421052631578, | |
| "grad_norm": 0.13302995264530182, | |
| "learning_rate": 7.877601063757323e-05, | |
| "loss": 0.8665, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.5052631578947369, | |
| "grad_norm": 0.11213305592536926, | |
| "learning_rate": 7.81762000751803e-05, | |
| "loss": 0.86, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.6736842105263157, | |
| "grad_norm": 0.11819213628768921, | |
| "learning_rate": 7.74604496478822e-05, | |
| "loss": 0.8542, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.09719839692115784, | |
| "learning_rate": 7.663093306620231e-05, | |
| "loss": 0.8334, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 0.1100865826010704, | |
| "learning_rate": 7.569016954473577e-05, | |
| "loss": 0.8453, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.8575334548950195, | |
| "eval_runtime": 14.0872, | |
| "eval_samples_per_second": 2.839, | |
| "eval_steps_per_second": 0.71, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.168421052631579, | |
| "grad_norm": 0.10792635381221771, | |
| "learning_rate": 7.464101615137756e-05, | |
| "loss": 0.8199, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.336842105263158, | |
| "grad_norm": 0.07824651896953583, | |
| "learning_rate": 7.348665913050115e-05, | |
| "loss": 0.8192, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.5052631578947366, | |
| "grad_norm": 0.0888928472995758, | |
| "learning_rate": 7.223060422643914e-05, | |
| "loss": 0.8278, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.6736842105263157, | |
| "grad_norm": 0.08335104584693909, | |
| "learning_rate": 7.087666603665284e-05, | |
| "loss": 0.7898, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 2.8421052631578947, | |
| "grad_norm": 0.07229428738355637, | |
| "learning_rate": 6.942895642692527e-05, | |
| "loss": 0.7708, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 0.0911983922123909, | |
| "learning_rate": 6.789187204375981e-05, | |
| "loss": 0.7935, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.8196868896484375, | |
| "eval_runtime": 14.0476, | |
| "eval_samples_per_second": 2.847, | |
| "eval_steps_per_second": 0.712, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 3.168421052631579, | |
| "grad_norm": 0.07855521142482758, | |
| "learning_rate": 6.627008096190938e-05, | |
| "loss": 0.7758, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 3.336842105263158, | |
| "grad_norm": 0.07116546481847763, | |
| "learning_rate": 6.456850850758673e-05, | |
| "loss": 0.7676, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 3.5052631578947366, | |
| "grad_norm": 0.07479118555784225, | |
| "learning_rate": 6.279232230041065e-05, | |
| "loss": 0.7809, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 3.6736842105263157, | |
| "grad_norm": 0.07439759373664856, | |
| "learning_rate": 6.094691655951512e-05, | |
| "loss": 0.7603, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 3.8421052631578947, | |
| "grad_norm": 0.07488042116165161, | |
| "learning_rate": 5.903789572148295e-05, | |
| "loss": 0.7592, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 0.081581249833107, | |
| "learning_rate": 5.707105741985615e-05, | |
| "loss": 0.7744, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.7952843308448792, | |
| "eval_runtime": 14.0551, | |
| "eval_samples_per_second": 2.846, | |
| "eval_steps_per_second": 0.711, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 4.168421052631579, | |
| "grad_norm": 0.07737040519714355, | |
| "learning_rate": 5.505237487791343e-05, | |
| "loss": 0.7539, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 4.336842105263158, | |
| "grad_norm": 0.0744493380188942, | |
| "learning_rate": 5.298797876818735e-05, | |
| "loss": 0.7744, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 4.505263157894737, | |
| "grad_norm": 0.07815603166818619, | |
| "learning_rate": 5.088413859381341e-05, | |
| "loss": 0.7403, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 4.673684210526316, | |
| "grad_norm": 0.07466059178113937, | |
| "learning_rate": 4.874724364825504e-05, | |
| "loss": 0.7633, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 4.842105263157895, | |
| "grad_norm": 0.07749485224485397, | |
| "learning_rate": 4.658378361122936e-05, | |
| "loss": 0.7238, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "grad_norm": 0.07556112110614777, | |
| "learning_rate": 4.440032883976318e-05, | |
| "loss": 0.7254, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.7805129289627075, | |
| "eval_runtime": 14.04, | |
| "eval_samples_per_second": 2.849, | |
| "eval_steps_per_second": 0.712, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 5.168421052631579, | |
| "grad_norm": 0.08062199503183365, | |
| "learning_rate": 4.220351041423462e-05, | |
| "loss": 0.7326, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 5.336842105263158, | |
| "grad_norm": 0.07008747011423111, | |
| "learning_rate": 4e-05, | |
| "loss": 0.7352, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 5.505263157894737, | |
| "grad_norm": 0.07470979541540146, | |
| "learning_rate": 3.779648958576538e-05, | |
| "loss": 0.7231, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 5.673684210526316, | |
| "grad_norm": 0.0774485394358635, | |
| "learning_rate": 3.559967116023683e-05, | |
| "loss": 0.7434, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 5.842105263157895, | |
| "grad_norm": 0.07653193175792694, | |
| "learning_rate": 3.341621638877064e-05, | |
| "loss": 0.7203, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "grad_norm": 0.08636533468961716, | |
| "learning_rate": 3.125275635174497e-05, | |
| "loss": 0.7332, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 0.770425021648407, | |
| "eval_runtime": 14.0425, | |
| "eval_samples_per_second": 2.848, | |
| "eval_steps_per_second": 0.712, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 6.168421052631579, | |
| "grad_norm": 0.0719999447464943, | |
| "learning_rate": 2.9115861406186593e-05, | |
| "loss": 0.7091, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 6.336842105263158, | |
| "grad_norm": 0.08508865535259247, | |
| "learning_rate": 2.7012021231812666e-05, | |
| "loss": 0.7064, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 6.505263157894737, | |
| "grad_norm": 0.08324179798364639, | |
| "learning_rate": 2.4947625122086585e-05, | |
| "loss": 0.7275, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 6.673684210526316, | |
| "grad_norm": 0.08268845826387405, | |
| "learning_rate": 2.2928942580143855e-05, | |
| "loss": 0.7246, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 6.842105263157895, | |
| "grad_norm": 0.07947085797786713, | |
| "learning_rate": 2.096210427851706e-05, | |
| "loss": 0.7414, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "grad_norm": 0.08708203583955765, | |
| "learning_rate": 1.9053083440484887e-05, | |
| "loss": 0.7149, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 0.7655454874038696, | |
| "eval_runtime": 14.044, | |
| "eval_samples_per_second": 2.848, | |
| "eval_steps_per_second": 0.712, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 7.168421052631579, | |
| "grad_norm": 0.07903850823640823, | |
| "learning_rate": 1.7207677699589355e-05, | |
| "loss": 0.7176, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 7.336842105263158, | |
| "grad_norm": 0.07560527324676514, | |
| "learning_rate": 1.5431491492413288e-05, | |
| "loss": 0.6999, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 7.505263157894737, | |
| "grad_norm": 0.07428433746099472, | |
| "learning_rate": 1.3729919038090627e-05, | |
| "loss": 0.7122, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 7.673684210526316, | |
| "grad_norm": 0.07619287818670273, | |
| "learning_rate": 1.2108127956240186e-05, | |
| "loss": 0.7084, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 7.842105263157895, | |
| "grad_norm": 0.07437249273061752, | |
| "learning_rate": 1.0571043573074737e-05, | |
| "loss": 0.7195, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "grad_norm": 0.08423606306314468, | |
| "learning_rate": 9.123333963347166e-06, | |
| "loss": 0.7298, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 0.7627403140068054, | |
| "eval_runtime": 14.0437, | |
| "eval_samples_per_second": 2.848, | |
| "eval_steps_per_second": 0.712, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 8.16842105263158, | |
| "grad_norm": 0.07223664224147797, | |
| "learning_rate": 7.769395773560874e-06, | |
| "loss": 0.7038, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 8.336842105263157, | |
| "grad_norm": 0.07732164114713669, | |
| "learning_rate": 6.513340869498859e-06, | |
| "loss": 0.7244, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 8.505263157894737, | |
| "grad_norm": 0.07888870686292648, | |
| "learning_rate": 5.358983848622452e-06, | |
| "loss": 0.7174, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 8.673684210526316, | |
| "grad_norm": 0.07529887557029724, | |
| "learning_rate": 4.3098304552642385e-06, | |
| "loss": 0.7, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 8.842105263157894, | |
| "grad_norm": 0.07611861079931259, | |
| "learning_rate": 3.3690669337977e-06, | |
| "loss": 0.7021, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "grad_norm": 0.07626490294933319, | |
| "learning_rate": 2.5395503521178143e-06, | |
| "loss": 0.7228, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 0.7619258165359497, | |
| "eval_runtime": 14.0325, | |
| "eval_samples_per_second": 2.851, | |
| "eval_steps_per_second": 0.713, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 9.16842105263158, | |
| "grad_norm": 0.07464896142482758, | |
| "learning_rate": 1.8237999248197002e-06, | |
| "loss": 0.7182, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 9.336842105263157, | |
| "grad_norm": 0.07674363255500793, | |
| "learning_rate": 1.2239893624267852e-06, | |
| "loss": 0.7328, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 9.505263157894737, | |
| "grad_norm": 0.0755220353603363, | |
| "learning_rate": 7.419402699032852e-07, | |
| "loss": 0.6907, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 9.673684210526316, | |
| "grad_norm": 0.07859135419130325, | |
| "learning_rate": 3.791166144998704e-07, | |
| "loss": 0.6981, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 9.842105263157894, | |
| "grad_norm": 0.07493507862091064, | |
| "learning_rate": 1.3662027973320614e-07, | |
| "loss": 0.7124, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "grad_norm": 0.07166945189237595, | |
| "learning_rate": 1.5187719001943378e-08, | |
| "loss": 0.7103, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 0.7616215348243713, | |
| "eval_runtime": 14.0416, | |
| "eval_samples_per_second": 2.849, | |
| "eval_steps_per_second": 0.712, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "step": 120, | |
| "total_flos": 3.853307493895635e+18, | |
| "train_loss": 0.7713966806729634, | |
| "train_runtime": 10792.2617, | |
| "train_samples_per_second": 0.704, | |
| "train_steps_per_second": 0.011 | |
| } | |
| ], | |
| "logging_steps": 2, | |
| "max_steps": 120, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.853307493895635e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |