AmirMohseni commited on
Commit
34a5949
·
verified ·
1 Parent(s): 277d981

Model save

Browse files
Files changed (5) hide show
  1. README.md +75 -0
  2. all_results.json +9 -0
  3. model.safetensors +1 -1
  4. train_results.json +9 -0
  5. trainer_state.json +396 -0
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: mit
4
+ base_model: jhu-clsp/mmBERT-base
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - precision
10
+ - recall
11
+ - f1
12
+ model-index:
13
+ - name: router-mmBERT-base-text-only-v3
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # router-mmBERT-base-text-only-v3
21
+
22
+ This model is a fine-tuned version of [jhu-clsp/mmBERT-base](https://huggingface.co/jhu-clsp/mmBERT-base) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.5245
25
+ - Accuracy: 0.7443
26
+ - Precision: 0.7442
27
+ - Recall: 0.7443
28
+ - F1: 0.7261
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 0.0001
48
+ - train_batch_size: 16
49
+ - eval_batch_size: 32
50
+ - seed: 42
51
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
52
+ - lr_scheduler_type: cosine
53
+ - num_epochs: 2
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
58
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
59
+ | No log | 0 | 0 | 1.0763 | 0.6193 | 0.5068 | 0.6193 | 0.5051 |
60
+ | 0.7067 | 0.2273 | 20 | 0.6497 | 0.6818 | 0.6798 | 0.6818 | 0.6807 |
61
+ | 0.7588 | 0.4545 | 40 | 0.8423 | 0.5114 | 0.7484 | 0.5114 | 0.4657 |
62
+ | 0.5581 | 0.6818 | 60 | 0.6621 | 0.6818 | 0.7433 | 0.6818 | 0.6867 |
63
+ | 0.641 | 0.9091 | 80 | 0.6033 | 0.6477 | 0.6849 | 0.6477 | 0.6544 |
64
+ | 0.5344 | 1.1364 | 100 | 0.6142 | 0.7216 | 0.7176 | 0.7216 | 0.7190 |
65
+ | 0.5484 | 1.3636 | 120 | 0.6226 | 0.6818 | 0.7328 | 0.6818 | 0.6874 |
66
+ | 0.5466 | 1.5909 | 140 | 0.5224 | 0.75 | 0.7525 | 0.75 | 0.7311 |
67
+ | 0.5092 | 1.8182 | 160 | 0.5245 | 0.7443 | 0.7442 | 0.7443 | 0.7261 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.57.1
73
+ - Pytorch 2.8.0+cu128
74
+ - Datasets 4.2.0
75
+ - Tokenizers 0.22.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 827712122198220.0,
4
+ "train_loss": 0.6994981775906953,
5
+ "train_runtime": 127.8433,
6
+ "train_samples": 1407,
7
+ "train_samples_per_second": 22.011,
8
+ "train_steps_per_second": 1.377
9
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6d82cc300943a1ac0fec3c61ed0ae4168764bfafcf388a226159324cbcc15d5
3
  size 1230141424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:230d06c7857f483c3990389a77f450b240f0eb648be0b35f93647f763afcc484
3
  size 1230141424
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.0,
3
+ "total_flos": 827712122198220.0,
4
+ "train_loss": 0.6994981775906953,
5
+ "train_runtime": 127.8433,
6
+ "train_samples": 1407,
7
+ "train_samples_per_second": 22.011,
8
+ "train_steps_per_second": 1.377
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 140,
3
+ "best_metric": 0.7311372549019608,
4
+ "best_model_checkpoint": "runs/router-mmBERT-base-text-only-v3/checkpoint-140",
5
+ "epoch": 2.0,
6
+ "eval_steps": 20,
7
+ "global_step": 176,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0,
14
+ "eval_accuracy": 0.6193181818181818,
15
+ "eval_f1": 0.5051193606153257,
16
+ "eval_loss": 1.0763274431228638,
17
+ "eval_precision": 0.506800891416276,
18
+ "eval_recall": 0.6193181818181818,
19
+ "eval_runtime": 2.2756,
20
+ "eval_samples_per_second": 77.343,
21
+ "eval_steps_per_second": 2.637,
22
+ "step": 0
23
+ },
24
+ {
25
+ "epoch": 0.056818181818181816,
26
+ "grad_norm": 42.762916564941406,
27
+ "learning_rate": 9.987260573051269e-05,
28
+ "loss": 3.6412,
29
+ "step": 5
30
+ },
31
+ {
32
+ "epoch": 0.11363636363636363,
33
+ "grad_norm": 16.814544677734375,
34
+ "learning_rate": 9.935617890443557e-05,
35
+ "loss": 1.1579,
36
+ "step": 10
37
+ },
38
+ {
39
+ "epoch": 0.17045454545454544,
40
+ "grad_norm": 5.188882350921631,
41
+ "learning_rate": 9.844686508907537e-05,
42
+ "loss": 0.6466,
43
+ "step": 15
44
+ },
45
+ {
46
+ "epoch": 0.22727272727272727,
47
+ "grad_norm": 21.053104400634766,
48
+ "learning_rate": 9.715190263989561e-05,
49
+ "loss": 0.7067,
50
+ "step": 20
51
+ },
52
+ {
53
+ "epoch": 0.22727272727272727,
54
+ "eval_accuracy": 0.6818181818181818,
55
+ "eval_f1": 0.6807008134441763,
56
+ "eval_loss": 0.6497310400009155,
57
+ "eval_precision": 0.6797602510675517,
58
+ "eval_recall": 0.6818181818181818,
59
+ "eval_runtime": 0.2876,
60
+ "eval_samples_per_second": 612.036,
61
+ "eval_steps_per_second": 20.865,
62
+ "step": 20
63
+ },
64
+ {
65
+ "epoch": 0.2840909090909091,
66
+ "grad_norm": 6.076605796813965,
67
+ "learning_rate": 9.548159976772592e-05,
68
+ "loss": 0.6684,
69
+ "step": 25
70
+ },
71
+ {
72
+ "epoch": 0.3409090909090909,
73
+ "grad_norm": 3.4750635623931885,
74
+ "learning_rate": 9.344925248293837e-05,
75
+ "loss": 0.7077,
76
+ "step": 30
77
+ },
78
+ {
79
+ "epoch": 0.3977272727272727,
80
+ "grad_norm": 7.481023788452148,
81
+ "learning_rate": 9.107103875602459e-05,
82
+ "loss": 0.5079,
83
+ "step": 35
84
+ },
85
+ {
86
+ "epoch": 0.45454545454545453,
87
+ "grad_norm": 23.338926315307617,
88
+ "learning_rate": 8.836588973708129e-05,
89
+ "loss": 0.7588,
90
+ "step": 40
91
+ },
92
+ {
93
+ "epoch": 0.45454545454545453,
94
+ "eval_accuracy": 0.5113636363636364,
95
+ "eval_f1": 0.4656789220169502,
96
+ "eval_loss": 0.8422594666481018,
97
+ "eval_precision": 0.7483603154836032,
98
+ "eval_recall": 0.5113636363636364,
99
+ "eval_runtime": 0.3049,
100
+ "eval_samples_per_second": 577.205,
101
+ "eval_steps_per_second": 19.677,
102
+ "step": 40
103
+ },
104
+ {
105
+ "epoch": 0.5113636363636364,
106
+ "grad_norm": 6.128546237945557,
107
+ "learning_rate": 8.535533905932738e-05,
108
+ "loss": 0.6416,
109
+ "step": 45
110
+ },
111
+ {
112
+ "epoch": 0.5681818181818182,
113
+ "grad_norm": 28.102933883666992,
114
+ "learning_rate": 8.206335142623305e-05,
115
+ "loss": 0.7067,
116
+ "step": 50
117
+ },
118
+ {
119
+ "epoch": 0.625,
120
+ "grad_norm": 12.337993621826172,
121
+ "learning_rate": 7.85161318467482e-05,
122
+ "loss": 0.6988,
123
+ "step": 55
124
+ },
125
+ {
126
+ "epoch": 0.6818181818181818,
127
+ "grad_norm": 12.026542663574219,
128
+ "learning_rate": 7.474191703716339e-05,
129
+ "loss": 0.5581,
130
+ "step": 60
131
+ },
132
+ {
133
+ "epoch": 0.6818181818181818,
134
+ "eval_accuracy": 0.6818181818181818,
135
+ "eval_f1": 0.6867387323527674,
136
+ "eval_loss": 0.6620599627494812,
137
+ "eval_precision": 0.7432805289948147,
138
+ "eval_recall": 0.6818181818181818,
139
+ "eval_runtime": 0.3332,
140
+ "eval_samples_per_second": 528.222,
141
+ "eval_steps_per_second": 18.008,
142
+ "step": 60
143
+ },
144
+ {
145
+ "epoch": 0.7386363636363636,
146
+ "grad_norm": 2.0800154209136963,
147
+ "learning_rate": 7.077075065009433e-05,
148
+ "loss": 0.6468,
149
+ "step": 65
150
+ },
151
+ {
152
+ "epoch": 0.7954545454545454,
153
+ "grad_norm": 1.9258697032928467,
154
+ "learning_rate": 6.663424411982121e-05,
155
+ "loss": 0.6727,
156
+ "step": 70
157
+ },
158
+ {
159
+ "epoch": 0.8522727272727273,
160
+ "grad_norm": 7.611169338226318,
161
+ "learning_rate": 6.236532502771078e-05,
162
+ "loss": 0.613,
163
+ "step": 75
164
+ },
165
+ {
166
+ "epoch": 0.9090909090909091,
167
+ "grad_norm": 4.331361293792725,
168
+ "learning_rate": 5.799797499079301e-05,
169
+ "loss": 0.641,
170
+ "step": 80
171
+ },
172
+ {
173
+ "epoch": 0.9090909090909091,
174
+ "eval_accuracy": 0.6477272727272727,
175
+ "eval_f1": 0.6544019138755981,
176
+ "eval_loss": 0.6033048033714294,
177
+ "eval_precision": 0.6849173553719008,
178
+ "eval_recall": 0.6477272727272727,
179
+ "eval_runtime": 0.3552,
180
+ "eval_samples_per_second": 495.499,
181
+ "eval_steps_per_second": 16.892,
182
+ "step": 80
183
+ },
184
+ {
185
+ "epoch": 0.9659090909090909,
186
+ "grad_norm": 6.545480728149414,
187
+ "learning_rate": 5.3566959159961615e-05,
188
+ "loss": 0.7836,
189
+ "step": 85
190
+ },
191
+ {
192
+ "epoch": 1.0227272727272727,
193
+ "grad_norm": 3.841913938522339,
194
+ "learning_rate": 4.9107549481057696e-05,
195
+ "loss": 0.6128,
196
+ "step": 90
197
+ },
198
+ {
199
+ "epoch": 1.0795454545454546,
200
+ "grad_norm": 9.448281288146973,
201
+ "learning_rate": 4.4655243921744374e-05,
202
+ "loss": 0.5637,
203
+ "step": 95
204
+ },
205
+ {
206
+ "epoch": 1.1363636363636362,
207
+ "grad_norm": 3.761906623840332,
208
+ "learning_rate": 4.0245483899193595e-05,
209
+ "loss": 0.5344,
210
+ "step": 100
211
+ },
212
+ {
213
+ "epoch": 1.1363636363636362,
214
+ "eval_accuracy": 0.7215909090909091,
215
+ "eval_f1": 0.7189715882867443,
216
+ "eval_loss": 0.6141549944877625,
217
+ "eval_precision": 0.7175536328078702,
218
+ "eval_recall": 0.7215909090909091,
219
+ "eval_runtime": 0.3339,
220
+ "eval_samples_per_second": 527.098,
221
+ "eval_steps_per_second": 17.969,
222
+ "step": 100
223
+ },
224
+ {
225
+ "epoch": 1.1931818181818181,
226
+ "grad_norm": 2.5487172603607178,
227
+ "learning_rate": 3.591337215792852e-05,
228
+ "loss": 0.4794,
229
+ "step": 105
230
+ },
231
+ {
232
+ "epoch": 1.25,
233
+ "grad_norm": 4.464008808135986,
234
+ "learning_rate": 3.1693393343581044e-05,
235
+ "loss": 0.4742,
236
+ "step": 110
237
+ },
238
+ {
239
+ "epoch": 1.3068181818181819,
240
+ "grad_norm": 5.295810222625732,
241
+ "learning_rate": 2.7619139496864378e-05,
242
+ "loss": 0.614,
243
+ "step": 115
244
+ },
245
+ {
246
+ "epoch": 1.3636363636363638,
247
+ "grad_norm": 6.302103042602539,
248
+ "learning_rate": 2.3723042652894362e-05,
249
+ "loss": 0.5484,
250
+ "step": 120
251
+ },
252
+ {
253
+ "epoch": 1.3636363636363638,
254
+ "eval_accuracy": 0.6818181818181818,
255
+ "eval_f1": 0.6874236294828752,
256
+ "eval_loss": 0.622637152671814,
257
+ "eval_precision": 0.7327687880360428,
258
+ "eval_recall": 0.6818181818181818,
259
+ "eval_runtime": 0.3682,
260
+ "eval_samples_per_second": 478.012,
261
+ "eval_steps_per_second": 16.296,
262
+ "step": 120
263
+ },
264
+ {
265
+ "epoch": 1.4204545454545454,
266
+ "grad_norm": 2.723407030105591,
267
+ "learning_rate": 2.0036116674432654e-05,
268
+ "loss": 0.5711,
269
+ "step": 125
270
+ },
271
+ {
272
+ "epoch": 1.4772727272727273,
273
+ "grad_norm": 2.9563422203063965,
274
+ "learning_rate": 1.6587710374121203e-05,
275
+ "loss": 0.4586,
276
+ "step": 130
277
+ },
278
+ {
279
+ "epoch": 1.5340909090909092,
280
+ "grad_norm": 5.308709621429443,
281
+ "learning_rate": 1.340527389091374e-05,
282
+ "loss": 0.7452,
283
+ "step": 135
284
+ },
285
+ {
286
+ "epoch": 1.5909090909090908,
287
+ "grad_norm": 3.0803794860839844,
288
+ "learning_rate": 1.0514140180404204e-05,
289
+ "loss": 0.5466,
290
+ "step": 140
291
+ },
292
+ {
293
+ "epoch": 1.5909090909090908,
294
+ "eval_accuracy": 0.75,
295
+ "eval_f1": 0.7311372549019608,
296
+ "eval_loss": 0.5224220752716064,
297
+ "eval_precision": 0.7524790236460717,
298
+ "eval_recall": 0.75,
299
+ "eval_runtime": 0.3221,
300
+ "eval_samples_per_second": 546.448,
301
+ "eval_steps_per_second": 18.629,
302
+ "step": 140
303
+ },
304
+ {
305
+ "epoch": 1.6477272727272727,
306
+ "grad_norm": 8.732233047485352,
307
+ "learning_rate": 7.937323358440935e-06,
308
+ "loss": 0.5014,
309
+ "step": 145
310
+ },
311
+ {
312
+ "epoch": 1.7045454545454546,
313
+ "grad_norm": 1.7209876775741577,
314
+ "learning_rate": 5.69533550325988e-06,
315
+ "loss": 0.4659,
316
+ "step": 150
317
+ },
318
+ {
319
+ "epoch": 1.7613636363636362,
320
+ "grad_norm": 6.374065399169922,
321
+ "learning_rate": 3.8060233744356633e-06,
322
+ "loss": 0.5324,
323
+ "step": 155
324
+ },
325
+ {
326
+ "epoch": 1.8181818181818183,
327
+ "grad_norm": 9.672929763793945,
328
+ "learning_rate": 2.2844263484068096e-06,
329
+ "loss": 0.5092,
330
+ "step": 160
331
+ },
332
+ {
333
+ "epoch": 1.8181818181818183,
334
+ "eval_accuracy": 0.7443181818181818,
335
+ "eval_f1": 0.7261242675911058,
336
+ "eval_loss": 0.5245174169540405,
337
+ "eval_precision": 0.7441852551341603,
338
+ "eval_recall": 0.7443181818181818,
339
+ "eval_runtime": 0.3788,
340
+ "eval_samples_per_second": 464.594,
341
+ "eval_steps_per_second": 15.838,
342
+ "step": 160
343
+ },
344
+ {
345
+ "epoch": 1.875,
346
+ "grad_norm": 10.658951759338379,
347
+ "learning_rate": 1.1426567014420297e-06,
348
+ "loss": 0.6107,
349
+ "step": 165
350
+ },
351
+ {
352
+ "epoch": 1.9318181818181817,
353
+ "grad_norm": 6.122763156890869,
354
+ "learning_rate": 3.8980319302407977e-07,
355
+ "loss": 0.4857,
356
+ "step": 170
357
+ },
358
+ {
359
+ "epoch": 1.9886363636363638,
360
+ "grad_norm": 5.741733074188232,
361
+ "learning_rate": 3.185871715041255e-08,
362
+ "loss": 0.541,
363
+ "step": 175
364
+ },
365
+ {
366
+ "epoch": 2.0,
367
+ "step": 176,
368
+ "total_flos": 827712122198220.0,
369
+ "train_loss": 0.6994981775906953,
370
+ "train_runtime": 127.8433,
371
+ "train_samples_per_second": 22.011,
372
+ "train_steps_per_second": 1.377
373
+ }
374
+ ],
375
+ "logging_steps": 5,
376
+ "max_steps": 176,
377
+ "num_input_tokens_seen": 0,
378
+ "num_train_epochs": 2,
379
+ "save_steps": 20,
380
+ "stateful_callbacks": {
381
+ "TrainerControl": {
382
+ "args": {
383
+ "should_epoch_stop": false,
384
+ "should_evaluate": false,
385
+ "should_log": false,
386
+ "should_save": true,
387
+ "should_training_stop": true
388
+ },
389
+ "attributes": {}
390
+ }
391
+ },
392
+ "total_flos": 827712122198220.0,
393
+ "train_batch_size": 16,
394
+ "trial_name": null,
395
+ "trial_params": null
396
+ }