| { | |
| "best_metric": 0.3995174765586853, | |
| "best_model_checkpoint": "./speecht5_tts_mabama_es/checkpoint-1200", | |
| "epoch": 25.53191489361702, | |
| "eval_steps": 1200, | |
| "global_step": 1200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 12.126326560974121, | |
| "learning_rate": 2.2e-07, | |
| "loss": 1.0931, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 9.78133487701416, | |
| "learning_rate": 4.7000000000000005e-07, | |
| "loss": 1.0628, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 24.999067306518555, | |
| "learning_rate": 7.2e-07, | |
| "loss": 0.947, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "grad_norm": 5.398886680603027, | |
| "learning_rate": 9.7e-07, | |
| "loss": 0.898, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "grad_norm": 12.817495346069336, | |
| "learning_rate": 1.2200000000000002e-06, | |
| "loss": 0.8029, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "grad_norm": 7.071120738983154, | |
| "learning_rate": 1.4700000000000001e-06, | |
| "loss": 0.7889, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "grad_norm": 4.3572163581848145, | |
| "learning_rate": 1.72e-06, | |
| "loss": 0.7719, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "grad_norm": 5.61962366104126, | |
| "learning_rate": 1.97e-06, | |
| "loss": 0.764, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "grad_norm": 6.79171085357666, | |
| "learning_rate": 2.2200000000000003e-06, | |
| "loss": 0.7441, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 5.32, | |
| "grad_norm": 6.525346755981445, | |
| "learning_rate": 2.47e-06, | |
| "loss": 0.711, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 5.85, | |
| "grad_norm": 1.7001874446868896, | |
| "learning_rate": 2.7200000000000002e-06, | |
| "loss": 0.7059, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 6.38, | |
| "grad_norm": 4.468942642211914, | |
| "learning_rate": 2.97e-06, | |
| "loss": 0.7021, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 6.91, | |
| "grad_norm": 7.033669948577881, | |
| "learning_rate": 3.2200000000000005e-06, | |
| "loss": 0.6266, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 7.45, | |
| "grad_norm": 3.8984785079956055, | |
| "learning_rate": 3.4700000000000002e-06, | |
| "loss": 0.6133, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 7.98, | |
| "grad_norm": 3.643061876296997, | |
| "learning_rate": 3.7200000000000004e-06, | |
| "loss": 0.58, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 8.51, | |
| "grad_norm": 6.500895023345947, | |
| "learning_rate": 3.97e-06, | |
| "loss": 0.5389, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 9.04, | |
| "grad_norm": 3.851013422012329, | |
| "learning_rate": 4.22e-06, | |
| "loss": 0.5578, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 9.57, | |
| "grad_norm": 5.976474285125732, | |
| "learning_rate": 4.47e-06, | |
| "loss": 0.5266, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 10.11, | |
| "grad_norm": 4.052149772644043, | |
| "learning_rate": 4.7200000000000005e-06, | |
| "loss": 0.5102, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 10.64, | |
| "grad_norm": 6.552683353424072, | |
| "learning_rate": 4.970000000000001e-06, | |
| "loss": 0.5382, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 11.17, | |
| "grad_norm": 2.714641571044922, | |
| "learning_rate": 5.220000000000001e-06, | |
| "loss": 0.5167, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 11.7, | |
| "grad_norm": 3.426229953765869, | |
| "learning_rate": 5.470000000000001e-06, | |
| "loss": 0.5106, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 12.23, | |
| "grad_norm": 5.78665018081665, | |
| "learning_rate": 5.72e-06, | |
| "loss": 0.5155, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 12.77, | |
| "grad_norm": 1.958343744277954, | |
| "learning_rate": 5.9700000000000004e-06, | |
| "loss": 0.4872, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 13.3, | |
| "grad_norm": 2.1352875232696533, | |
| "learning_rate": 6.220000000000001e-06, | |
| "loss": 0.4866, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 13.83, | |
| "grad_norm": 3.6809630393981934, | |
| "learning_rate": 6.470000000000001e-06, | |
| "loss": 0.5014, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 14.36, | |
| "grad_norm": 2.484553098678589, | |
| "learning_rate": 6.720000000000001e-06, | |
| "loss": 0.4892, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 14.89, | |
| "grad_norm": 3.656264305114746, | |
| "learning_rate": 6.97e-06, | |
| "loss": 0.4712, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 15.43, | |
| "grad_norm": 3.709062337875366, | |
| "learning_rate": 7.22e-06, | |
| "loss": 0.4843, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 15.96, | |
| "grad_norm": 2.776505708694458, | |
| "learning_rate": 7.4700000000000005e-06, | |
| "loss": 0.4743, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 16.49, | |
| "grad_norm": 3.913029909133911, | |
| "learning_rate": 7.72e-06, | |
| "loss": 0.4841, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 17.02, | |
| "grad_norm": 3.667405366897583, | |
| "learning_rate": 7.970000000000002e-06, | |
| "loss": 0.4853, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 17.55, | |
| "grad_norm": 2.917257070541382, | |
| "learning_rate": 8.220000000000001e-06, | |
| "loss": 0.4773, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 18.09, | |
| "grad_norm": 3.051287889480591, | |
| "learning_rate": 8.47e-06, | |
| "loss": 0.4772, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 18.62, | |
| "grad_norm": 3.4974617958068848, | |
| "learning_rate": 8.720000000000001e-06, | |
| "loss": 0.4599, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 19.15, | |
| "grad_norm": 2.259305238723755, | |
| "learning_rate": 8.97e-06, | |
| "loss": 0.4694, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 19.68, | |
| "grad_norm": 3.4457366466522217, | |
| "learning_rate": 9.220000000000002e-06, | |
| "loss": 0.4616, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 20.21, | |
| "grad_norm": 7.221055030822754, | |
| "learning_rate": 9.47e-06, | |
| "loss": 0.4613, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 20.74, | |
| "grad_norm": 9.327741622924805, | |
| "learning_rate": 9.72e-06, | |
| "loss": 0.4634, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 21.28, | |
| "grad_norm": 3.205131769180298, | |
| "learning_rate": 9.970000000000001e-06, | |
| "loss": 0.4691, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 21.81, | |
| "grad_norm": 5.403283596038818, | |
| "learning_rate": 8.900000000000001e-06, | |
| "loss": 0.46, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 22.34, | |
| "grad_norm": 2.766523838043213, | |
| "learning_rate": 7.650000000000001e-06, | |
| "loss": 0.4462, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 22.87, | |
| "grad_norm": 3.033956289291382, | |
| "learning_rate": 6.4000000000000006e-06, | |
| "loss": 0.4491, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 23.4, | |
| "grad_norm": 3.4086856842041016, | |
| "learning_rate": 5.150000000000001e-06, | |
| "loss": 0.4452, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 23.94, | |
| "grad_norm": 2.144127607345581, | |
| "learning_rate": 3.900000000000001e-06, | |
| "loss": 0.4508, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 24.47, | |
| "grad_norm": 2.2593584060668945, | |
| "learning_rate": 2.6500000000000005e-06, | |
| "loss": 0.4432, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "grad_norm": 3.3258821964263916, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.4526, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 25.53, | |
| "grad_norm": 1.783236026763916, | |
| "learning_rate": 1.5000000000000002e-07, | |
| "loss": 0.4516, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 25.53, | |
| "eval_loss": 0.3995174765586853, | |
| "eval_runtime": 4.5311, | |
| "eval_samples_per_second": 36.857, | |
| "eval_steps_per_second": 4.635, | |
| "step": 1200 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 1200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 26, | |
| "save_steps": 1200, | |
| "total_flos": 3336935371995456.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |