peining7003 commited on
Commit
2475d51
·
verified ·
1 Parent(s): b9971ee

End of training

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +10 -11
  2. model.safetensors +1 -1
  3. run-0/checkpoint-1070/config.json +24 -0
  4. run-0/checkpoint-1070/model.safetensors +3 -0
  5. run-0/checkpoint-1070/optimizer.pt +3 -0
  6. run-0/checkpoint-1070/rng_state.pth +3 -0
  7. run-0/checkpoint-1070/scheduler.pt +3 -0
  8. run-0/checkpoint-1070/special_tokens_map.json +7 -0
  9. run-0/checkpoint-1070/tokenizer.json +0 -0
  10. run-0/checkpoint-1070/tokenizer_config.json +56 -0
  11. run-0/checkpoint-1070/trainer_state.json +71 -0
  12. run-0/checkpoint-1070/training_args.bin +3 -0
  13. run-0/checkpoint-1070/vocab.txt +0 -0
  14. run-0/checkpoint-535/config.json +24 -0
  15. run-0/checkpoint-535/model.safetensors +3 -0
  16. run-0/checkpoint-535/optimizer.pt +3 -0
  17. run-0/checkpoint-535/rng_state.pth +3 -0
  18. run-0/checkpoint-535/scheduler.pt +3 -0
  19. run-0/checkpoint-535/special_tokens_map.json +7 -0
  20. run-0/checkpoint-535/tokenizer.json +0 -0
  21. run-0/checkpoint-535/tokenizer_config.json +56 -0
  22. run-0/checkpoint-535/trainer_state.json +55 -0
  23. run-0/checkpoint-535/training_args.bin +3 -0
  24. run-0/checkpoint-535/vocab.txt +0 -0
  25. run-1/checkpoint-2138/config.json +24 -0
  26. run-1/checkpoint-2138/model.safetensors +3 -0
  27. run-1/checkpoint-2138/optimizer.pt +3 -0
  28. run-1/checkpoint-2138/rng_state.pth +3 -0
  29. run-1/checkpoint-2138/scheduler.pt +3 -0
  30. run-1/checkpoint-2138/special_tokens_map.json +7 -0
  31. run-1/checkpoint-2138/tokenizer.json +0 -0
  32. run-1/checkpoint-2138/tokenizer_config.json +56 -0
  33. run-1/checkpoint-2138/trainer_state.json +76 -0
  34. run-1/checkpoint-2138/training_args.bin +3 -0
  35. run-1/checkpoint-2138/vocab.txt +0 -0
  36. run-2/checkpoint-134/config.json +24 -0
  37. run-2/checkpoint-134/model.safetensors +3 -0
  38. run-2/checkpoint-134/optimizer.pt +3 -0
  39. run-2/checkpoint-134/rng_state.pth +3 -0
  40. run-2/checkpoint-134/scheduler.pt +3 -0
  41. run-2/checkpoint-134/special_tokens_map.json +7 -0
  42. run-2/checkpoint-134/tokenizer.json +0 -0
  43. run-2/checkpoint-134/tokenizer_config.json +56 -0
  44. run-2/checkpoint-134/trainer_state.json +48 -0
  45. run-2/checkpoint-134/training_args.bin +3 -0
  46. run-2/checkpoint-134/vocab.txt +0 -0
  47. run-2/checkpoint-268/config.json +24 -0
  48. run-2/checkpoint-268/model.safetensors +3 -0
  49. run-2/checkpoint-268/optimizer.pt +3 -0
  50. run-2/checkpoint-268/rng_state.pth +3 -0
README.md CHANGED
@@ -18,8 +18,8 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.7641
22
- - Matthews Correlation: 0.5567
23
 
24
  ## Model description
25
 
@@ -38,23 +38,22 @@ More information needed
38
  ### Training hyperparameters
39
 
40
  The following hyperparameters were used during training:
41
- - learning_rate: 2e-05
42
- - train_batch_size: 16
43
  - eval_batch_size: 16
44
- - seed: 42
45
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
  - lr_scheduler_type: linear
47
- - num_epochs: 5
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
52
  |:-------------:|:-----:|:----:|:---------------:|:--------------------:|
53
- | 0.5213 | 1.0 | 535 | 0.4669 | 0.4528 |
54
- | 0.349 | 2.0 | 1070 | 0.4800 | 0.5375 |
55
- | 0.234 | 3.0 | 1605 | 0.6216 | 0.5264 |
56
- | 0.1691 | 4.0 | 2140 | 0.7641 | 0.5567 |
57
- | 0.1261 | 5.0 | 2675 | 0.8071 | 0.5445 |
58
 
59
 
60
  ### Framework versions
 
18
 
19
  This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 0.6691
22
+ - Matthews Correlation: 0.5171
23
 
24
  ## Model description
25
 
 
38
  ### Training hyperparameters
39
 
40
  The following hyperparameters were used during training:
41
+ - learning_rate: 5.6294991226703914e-05
42
+ - train_batch_size: 64
43
  - eval_batch_size: 16
44
+ - seed: 16
45
  - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
46
  - lr_scheduler_type: linear
47
+ - num_epochs: 4
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |
52
  |:-------------:|:-----:|:----:|:---------------:|:--------------------:|
53
+ | No log | 1.0 | 134 | 0.4796 | 0.4210 |
54
+ | No log | 2.0 | 268 | 0.4766 | 0.5101 |
55
+ | No log | 3.0 | 402 | 0.5300 | 0.5083 |
56
+ | 0.3001 | 4.0 | 536 | 0.6691 | 0.5171 |
 
57
 
58
 
59
  ### Framework versions
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e44db60df6193e112b628003731c5e194714df558d5cb7c00404045314b10a1e
3
  size 267832560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4813655a293b4779a7bf40361c15fe7aa7d6e41cd4cacea21aefbb0872d6b3d6
3
  size 267832560
run-0/checkpoint-1070/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "initializer_range": 0.02,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "distilbert",
13
+ "n_heads": 12,
14
+ "n_layers": 6,
15
+ "pad_token_id": 0,
16
+ "problem_type": "single_label_classification",
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.51.3",
23
+ "vocab_size": 30522
24
+ }
run-0/checkpoint-1070/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a45718d416fa19873ea237bd2dc370655211409d5029e24610067794425c9f8
3
+ size 267832560
run-0/checkpoint-1070/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d998c68bc655f5e85c6c9dfa01b152ae54abefe6eb9307834ba0d8b8665b3a33
3
+ size 535727290
run-0/checkpoint-1070/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a848d86a853966ff755b2fd299514e0e274e93ac37127e213438a577eabdc6af
3
+ size 14308
run-0/checkpoint-1070/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466d26137087ced32f7ee304d01c63e916fe12b1f7864d70ae3b2952fa5a6f09
3
+ size 1064
run-0/checkpoint-1070/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-0/checkpoint-1070/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-0/checkpoint-1070/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
run-0/checkpoint-1070/trainer_state.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 535,
3
+ "best_metric": 0.0,
4
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-535",
5
+ "epoch": 2.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1070,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9345794392523364,
14
+ "grad_norm": 2.0295701026916504,
15
+ "learning_rate": 9.702944307989348e-07,
16
+ "loss": 0.6076,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 0.5909003615379333,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.7078,
24
+ "eval_samples_per_second": 1473.606,
25
+ "eval_steps_per_second": 93.248,
26
+ "step": 535
27
+ },
28
+ {
29
+ "epoch": 1.8691588785046729,
30
+ "grad_norm": 2.802403211593628,
31
+ "learning_rate": 1.2064957020442099e-07,
32
+ "loss": 0.5716,
33
+ "step": 1000
34
+ },
35
+ {
36
+ "epoch": 2.0,
37
+ "eval_loss": 0.5756940245628357,
38
+ "eval_matthews_correlation": 0.0,
39
+ "eval_runtime": 0.7224,
40
+ "eval_samples_per_second": 1443.807,
41
+ "eval_steps_per_second": 91.363,
42
+ "step": 1070
43
+ }
44
+ ],
45
+ "logging_steps": 500,
46
+ "max_steps": 1070,
47
+ "num_input_tokens_seen": 0,
48
+ "num_train_epochs": 2,
49
+ "save_steps": 500,
50
+ "stateful_callbacks": {
51
+ "TrainerControl": {
52
+ "args": {
53
+ "should_epoch_stop": false,
54
+ "should_evaluate": false,
55
+ "should_log": false,
56
+ "should_save": true,
57
+ "should_training_stop": true
58
+ },
59
+ "attributes": {}
60
+ }
61
+ },
62
+ "total_flos": 85425431159064.0,
63
+ "train_batch_size": 16,
64
+ "trial_name": null,
65
+ "trial_params": {
66
+ "learning_rate": 1.8182400016722597e-06,
67
+ "num_train_epochs": 2,
68
+ "per_device_train_batch_size": 16,
69
+ "seed": 26
70
+ }
71
+ }
run-0/checkpoint-1070/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe770f7319fb6cea80b1fb375c1ba065aa50b18047d09d3111902d6cc94f85f0
3
+ size 5368
run-0/checkpoint-1070/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-0/checkpoint-535/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "initializer_range": 0.02,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "distilbert",
13
+ "n_heads": 12,
14
+ "n_layers": 6,
15
+ "pad_token_id": 0,
16
+ "problem_type": "single_label_classification",
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.51.3",
23
+ "vocab_size": 30522
24
+ }
run-0/checkpoint-535/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198fe3a56b6ef7e6ba77bd6b964f031352c5c2d06a4faf9c806ef50e9c137cae
3
+ size 267832560
run-0/checkpoint-535/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39c2317972aca2b5880e4e406eff88e0e998bab8910c3eb0aa1f0642390eb5a5
3
+ size 535727290
run-0/checkpoint-535/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ffb3f60df81b69d11b676e27e5d2b594801fe8b95b6f48470b8cbbcd625ea52
3
+ size 14308
run-0/checkpoint-535/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f6620632a6c1d53d63fc7e4ad0a2d171e69a77994a95830271f5ad0b4273615
3
+ size 1064
run-0/checkpoint-535/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-0/checkpoint-535/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-0/checkpoint-535/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
run-0/checkpoint-535/trainer_state.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 535,
3
+ "best_metric": 0.0,
4
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-0/checkpoint-535",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 535,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.9345794392523364,
14
+ "grad_norm": 2.0295701026916504,
15
+ "learning_rate": 9.702944307989348e-07,
16
+ "loss": 0.6076,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 1.0,
21
+ "eval_loss": 0.5909003615379333,
22
+ "eval_matthews_correlation": 0.0,
23
+ "eval_runtime": 0.7078,
24
+ "eval_samples_per_second": 1473.606,
25
+ "eval_steps_per_second": 93.248,
26
+ "step": 535
27
+ }
28
+ ],
29
+ "logging_steps": 500,
30
+ "max_steps": 1070,
31
+ "num_input_tokens_seen": 0,
32
+ "num_train_epochs": 2,
33
+ "save_steps": 500,
34
+ "stateful_callbacks": {
35
+ "TrainerControl": {
36
+ "args": {
37
+ "should_epoch_stop": false,
38
+ "should_evaluate": false,
39
+ "should_log": false,
40
+ "should_save": true,
41
+ "should_training_stop": false
42
+ },
43
+ "attributes": {}
44
+ }
45
+ },
46
+ "total_flos": 42861482677632.0,
47
+ "train_batch_size": 16,
48
+ "trial_name": null,
49
+ "trial_params": {
50
+ "learning_rate": 1.8182400016722597e-06,
51
+ "num_train_epochs": 2,
52
+ "per_device_train_batch_size": 16,
53
+ "seed": 26
54
+ }
55
+ }
run-0/checkpoint-535/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe770f7319fb6cea80b1fb375c1ba065aa50b18047d09d3111902d6cc94f85f0
3
+ size 5368
run-0/checkpoint-535/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2138/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "initializer_range": 0.02,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "distilbert",
13
+ "n_heads": 12,
14
+ "n_layers": 6,
15
+ "pad_token_id": 0,
16
+ "problem_type": "single_label_classification",
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.51.3",
23
+ "vocab_size": 30522
24
+ }
run-1/checkpoint-2138/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f310f57ab80f0592c9160ce756727f7650399410d638aae3179a85f74c99b45
3
+ size 267832560
run-1/checkpoint-2138/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48cc5bbc02e27eca1b952b123c1ce7d3dd7b6ebee078b698802cc241b4a6ab4e
3
+ size 535727290
run-1/checkpoint-2138/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76183bf835b34d1a4d7fb545593de762f7872bdf7b623461e23a8160c4fda7d8
3
+ size 14244
run-1/checkpoint-2138/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43137230bbe1d1bcd45fd698c4587830dd6ec07e62e7522d69e270a3f976760
3
+ size 1064
run-1/checkpoint-2138/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-1/checkpoint-2138/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-1/checkpoint-2138/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
run-1/checkpoint-2138/trainer_state.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 2138,
3
+ "best_metric": 0.45017936365298244,
4
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-1/checkpoint-2138",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 2138,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.23386342376052385,
14
+ "grad_norm": 1.3968665599822998,
15
+ "learning_rate": 1.978201203334086e-05,
16
+ "loss": 0.5843,
17
+ "step": 500
18
+ },
19
+ {
20
+ "epoch": 0.4677268475210477,
21
+ "grad_norm": 6.756799221038818,
22
+ "learning_rate": 1.374723105916732e-05,
23
+ "loss": 0.5895,
24
+ "step": 1000
25
+ },
26
+ {
27
+ "epoch": 0.7015902712815716,
28
+ "grad_norm": 2.7195584774017334,
29
+ "learning_rate": 7.712450084993782e-06,
30
+ "loss": 0.569,
31
+ "step": 1500
32
+ },
33
+ {
34
+ "epoch": 0.9354536950420954,
35
+ "grad_norm": 18.30387306213379,
36
+ "learning_rate": 1.6776691108202439e-06,
37
+ "loss": 0.5309,
38
+ "step": 2000
39
+ },
40
+ {
41
+ "epoch": 1.0,
42
+ "eval_loss": 0.5654900670051575,
43
+ "eval_matthews_correlation": 0.45017936365298244,
44
+ "eval_runtime": 0.726,
45
+ "eval_samples_per_second": 1436.625,
46
+ "eval_steps_per_second": 90.908,
47
+ "step": 2138
48
+ }
49
+ ],
50
+ "logging_steps": 500,
51
+ "max_steps": 2138,
52
+ "num_input_tokens_seen": 0,
53
+ "num_train_epochs": 1,
54
+ "save_steps": 500,
55
+ "stateful_callbacks": {
56
+ "TrainerControl": {
57
+ "args": {
58
+ "should_epoch_stop": false,
59
+ "should_evaluate": false,
60
+ "should_log": false,
61
+ "should_save": true,
62
+ "should_training_stop": true
63
+ },
64
+ "attributes": {}
65
+ }
66
+ },
67
+ "total_flos": 32530060484016.0,
68
+ "train_batch_size": 4,
69
+ "trial_name": null,
70
+ "trial_params": {
71
+ "learning_rate": 2.5804723445566052e-05,
72
+ "num_train_epochs": 1,
73
+ "per_device_train_batch_size": 4,
74
+ "seed": 16
75
+ }
76
+ }
run-1/checkpoint-2138/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:057ddd6314ab10f9fc3363b60135771e3c9585e697ca06a7ff738418ddfdd174
3
+ size 5368
run-1/checkpoint-2138/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-134/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "initializer_range": 0.02,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "distilbert",
13
+ "n_heads": 12,
14
+ "n_layers": 6,
15
+ "pad_token_id": 0,
16
+ "problem_type": "single_label_classification",
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.51.3",
23
+ "vocab_size": 30522
24
+ }
run-2/checkpoint-134/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb9197ea4d627a76797f87305b21c936204a2252a68cf6cc7f4843344b02aca3
3
+ size 267832560
run-2/checkpoint-134/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7311fd5fd0f4f3b1611ed4faea72b4963cce64f7a8c450ab2249d33ee5d400b
3
+ size 535727290
run-2/checkpoint-134/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ccf97e2344782e268544b5e3a3ea7969bdf01707b6b1f33b78edac2b591b7df
3
+ size 14244
run-2/checkpoint-134/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f49736a3d6cfd41d425ce6b13408cd9351b3e3e3b2c0e8ec5f54b4a4b9ffe3de
3
+ size 1064
run-2/checkpoint-134/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
run-2/checkpoint-134/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-134/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
run-2/checkpoint-134/trainer_state.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": 134,
3
+ "best_metric": 0.42096010214901264,
4
+ "best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-2/checkpoint-134",
5
+ "epoch": 1.0,
6
+ "eval_steps": 500,
7
+ "global_step": 134,
8
+ "is_hyper_param_search": true,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 1.0,
14
+ "eval_loss": 0.4796466827392578,
15
+ "eval_matthews_correlation": 0.42096010214901264,
16
+ "eval_runtime": 0.7337,
17
+ "eval_samples_per_second": 1421.482,
18
+ "eval_steps_per_second": 89.95,
19
+ "step": 134
20
+ }
21
+ ],
22
+ "logging_steps": 500,
23
+ "max_steps": 536,
24
+ "num_input_tokens_seen": 0,
25
+ "num_train_epochs": 4,
26
+ "save_steps": 500,
27
+ "stateful_callbacks": {
28
+ "TrainerControl": {
29
+ "args": {
30
+ "should_epoch_stop": false,
31
+ "should_evaluate": false,
32
+ "should_log": false,
33
+ "should_save": true,
34
+ "should_training_stop": false
35
+ },
36
+ "attributes": {}
37
+ }
38
+ },
39
+ "total_flos": 0,
40
+ "train_batch_size": 64,
41
+ "trial_name": null,
42
+ "trial_params": {
43
+ "learning_rate": 5.6294991226703914e-05,
44
+ "num_train_epochs": 4,
45
+ "per_device_train_batch_size": 64,
46
+ "seed": 16
47
+ }
48
+ }
run-2/checkpoint-134/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d7d1bbb527f5c32039612c95457770255214469244ccce27b04604e0894a14
3
+ size 5368
run-2/checkpoint-134/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-268/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "architectures": [
4
+ "DistilBertForSequenceClassification"
5
+ ],
6
+ "attention_dropout": 0.1,
7
+ "dim": 768,
8
+ "dropout": 0.1,
9
+ "hidden_dim": 3072,
10
+ "initializer_range": 0.02,
11
+ "max_position_embeddings": 512,
12
+ "model_type": "distilbert",
13
+ "n_heads": 12,
14
+ "n_layers": 6,
15
+ "pad_token_id": 0,
16
+ "problem_type": "single_label_classification",
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.51.3",
23
+ "vocab_size": 30522
24
+ }
run-2/checkpoint-268/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed4a53ba41e461516cb3bc56f24a2bfea31dbd629376fdd80e626f783f89a53
3
+ size 267832560
run-2/checkpoint-268/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff39d18eda8398ab31d703f0e5ad15d7c2c3e793f05097f92a2017a699f07364
3
+ size 535727290
run-2/checkpoint-268/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79c1bebd1ab58c5f8b31eb965e16dfcaf5d403bab5e7474b85bc09e4a11e0ee5
3
+ size 14244