Priyansu19 commited on
Commit
8fbb652
·
verified ·
1 Parent(s): 6995756

Initial upload of custom EirGrid PatchTST model

Browse files
checkpoint-1000/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu",
3
+ "architectures": [
4
+ "PatchTSTForPrediction"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bias": true,
8
+ "channel_attention": false,
9
+ "channel_consistent_masking": false,
10
+ "context_length": 512,
11
+ "d_model": 128,
12
+ "distribution_output": "student_t",
13
+ "do_mask_input": null,
14
+ "dropout": 0.2,
15
+ "dtype": "float32",
16
+ "ff_dropout": 0.0,
17
+ "ffn_dim": 512,
18
+ "head_dropout": 0.0,
19
+ "init_std": 0.02,
20
+ "loss": "mse",
21
+ "mask_type": "random",
22
+ "mask_value": 0,
23
+ "model_type": "patchtst",
24
+ "norm_eps": 1e-05,
25
+ "norm_type": "batchnorm",
26
+ "num_attention_heads": 4,
27
+ "num_forecast_mask_patches": [
28
+ 2
29
+ ],
30
+ "num_hidden_layers": 3,
31
+ "num_input_channels": 1,
32
+ "num_parallel_samples": 100,
33
+ "num_targets": 1,
34
+ "output_range": null,
35
+ "patch_length": 16,
36
+ "patch_stride": 1,
37
+ "path_dropout": 0.0,
38
+ "pooling_type": "mean",
39
+ "positional_dropout": 0.0,
40
+ "positional_encoding_type": "sincos",
41
+ "pre_norm": true,
42
+ "prediction_length": 96,
43
+ "random_mask_ratio": 0.5,
44
+ "scaling": "std",
45
+ "share_embedding": true,
46
+ "share_projection": true,
47
+ "stride": 8,
48
+ "transformers_version": "4.57.2",
49
+ "unmasked_channel_indices": null,
50
+ "use_cls_token": false
51
+ }
checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7783aaa156b697cdb78eabbaa2a26d0d28a950a00dd764730d39163ad101d481
3
+ size 2706232
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c439cc54b1a36d311f8fadb5da32ac3e5db22d2a64c3c4a087093a26936c38c8
3
+ size 4919755
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f9a0f7843a37be87edd23f4e88aa93b38b95cc2c07503eeb1cf2e4632453a2
3
+ size 14645
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5aba71615288bb81f9b9c6331534f2597fe07aa26167e1a3fa22a9f6842cf66
3
+ size 1465
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 5.882352941176471,
6
+ "eval_steps": 500,
7
+ "global_step": 1000,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.5882352941176471,
14
+ "grad_norm": 0.6052117347717285,
15
+ "learning_rate": 0.000941764705882353,
16
+ "loss": 0.4992,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 1.1764705882352942,
21
+ "grad_norm": 0.9228031635284424,
22
+ "learning_rate": 0.0008829411764705883,
23
+ "loss": 0.3017,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 1.7647058823529411,
28
+ "grad_norm": 0.9054152965545654,
29
+ "learning_rate": 0.0008241176470588235,
30
+ "loss": 0.2262,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 2.3529411764705883,
35
+ "grad_norm": 1.4709264039993286,
36
+ "learning_rate": 0.0007652941176470588,
37
+ "loss": 0.2054,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 2.9411764705882355,
42
+ "grad_norm": 0.9707149267196655,
43
+ "learning_rate": 0.0007064705882352941,
44
+ "loss": 0.189,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 3.5294117647058822,
49
+ "grad_norm": 1.0779001712799072,
50
+ "learning_rate": 0.0006476470588235295,
51
+ "loss": 0.1946,
52
+ "step": 600
53
+ },
54
+ {
55
+ "epoch": 4.117647058823529,
56
+ "grad_norm": 0.618778645992279,
57
+ "learning_rate": 0.0005888235294117648,
58
+ "loss": 0.1707,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 4.705882352941177,
63
+ "grad_norm": 0.7503094673156738,
64
+ "learning_rate": 0.0005300000000000001,
65
+ "loss": 0.1713,
66
+ "step": 800
67
+ },
68
+ {
69
+ "epoch": 5.294117647058823,
70
+ "grad_norm": 0.9175742268562317,
71
+ "learning_rate": 0.00047117647058823533,
72
+ "loss": 0.1562,
73
+ "step": 900
74
+ },
75
+ {
76
+ "epoch": 5.882352941176471,
77
+ "grad_norm": 1.0091431140899658,
78
+ "learning_rate": 0.0004123529411764706,
79
+ "loss": 0.1596,
80
+ "step": 1000
81
+ }
82
+ ],
83
+ "logging_steps": 100,
84
+ "max_steps": 1700,
85
+ "num_input_tokens_seen": 0,
86
+ "num_train_epochs": 10,
87
+ "save_steps": 500,
88
+ "stateful_callbacks": {
89
+ "TrainerControl": {
90
+ "args": {
91
+ "should_epoch_stop": false,
92
+ "should_evaluate": false,
93
+ "should_log": false,
94
+ "should_save": true,
95
+ "should_training_stop": false
96
+ },
97
+ "attributes": {}
98
+ }
99
+ },
100
+ "total_flos": 66137131253760.0,
101
+ "train_batch_size": 32,
102
+ "trial_name": null,
103
+ "trial_params": null
104
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32774ff877551baa776c41f2f9ab000017cfa88cf56832d20d86e8ff5e86bb0b
3
+ size 5841
checkpoint-1500/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu",
3
+ "architectures": [
4
+ "PatchTSTForPrediction"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bias": true,
8
+ "channel_attention": false,
9
+ "channel_consistent_masking": false,
10
+ "context_length": 512,
11
+ "d_model": 128,
12
+ "distribution_output": "student_t",
13
+ "do_mask_input": null,
14
+ "dropout": 0.2,
15
+ "dtype": "float32",
16
+ "ff_dropout": 0.0,
17
+ "ffn_dim": 512,
18
+ "head_dropout": 0.0,
19
+ "init_std": 0.02,
20
+ "loss": "mse",
21
+ "mask_type": "random",
22
+ "mask_value": 0,
23
+ "model_type": "patchtst",
24
+ "norm_eps": 1e-05,
25
+ "norm_type": "batchnorm",
26
+ "num_attention_heads": 4,
27
+ "num_forecast_mask_patches": [
28
+ 2
29
+ ],
30
+ "num_hidden_layers": 3,
31
+ "num_input_channels": 1,
32
+ "num_parallel_samples": 100,
33
+ "num_targets": 1,
34
+ "output_range": null,
35
+ "patch_length": 16,
36
+ "patch_stride": 1,
37
+ "path_dropout": 0.0,
38
+ "pooling_type": "mean",
39
+ "positional_dropout": 0.0,
40
+ "positional_encoding_type": "sincos",
41
+ "pre_norm": true,
42
+ "prediction_length": 96,
43
+ "random_mask_ratio": 0.5,
44
+ "scaling": "std",
45
+ "share_embedding": true,
46
+ "share_projection": true,
47
+ "stride": 8,
48
+ "transformers_version": "4.57.2",
49
+ "unmasked_channel_indices": null,
50
+ "use_cls_token": false
51
+ }
checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4147fcbbd8d86e98e56356a4eb8c3333f9186b73fdae01fa4fa644becca8d093
3
+ size 2706232
checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef63cec4e1f261f1d7cb317bbd0e54d94b5db5a929aa61da6bf62e65005fd884
3
+ size 4919755
checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:363c5df1543d2c82b2f13164f35bdd0367ceb32e7fa1b2f67c19df073a08b17b
3
+ size 14645
checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fc5e55673b02706cf235bd18ab925b394c852bf28a456273fa32aa1352cec0f
3
+ size 1465
checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 8.823529411764707,
6
+ "eval_steps": 500,
7
+ "global_step": 1500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.5882352941176471,
14
+ "grad_norm": 0.6052117347717285,
15
+ "learning_rate": 0.000941764705882353,
16
+ "loss": 0.4992,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 1.1764705882352942,
21
+ "grad_norm": 0.9228031635284424,
22
+ "learning_rate": 0.0008829411764705883,
23
+ "loss": 0.3017,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 1.7647058823529411,
28
+ "grad_norm": 0.9054152965545654,
29
+ "learning_rate": 0.0008241176470588235,
30
+ "loss": 0.2262,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 2.3529411764705883,
35
+ "grad_norm": 1.4709264039993286,
36
+ "learning_rate": 0.0007652941176470588,
37
+ "loss": 0.2054,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 2.9411764705882355,
42
+ "grad_norm": 0.9707149267196655,
43
+ "learning_rate": 0.0007064705882352941,
44
+ "loss": 0.189,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 3.5294117647058822,
49
+ "grad_norm": 1.0779001712799072,
50
+ "learning_rate": 0.0006476470588235295,
51
+ "loss": 0.1946,
52
+ "step": 600
53
+ },
54
+ {
55
+ "epoch": 4.117647058823529,
56
+ "grad_norm": 0.618778645992279,
57
+ "learning_rate": 0.0005888235294117648,
58
+ "loss": 0.1707,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 4.705882352941177,
63
+ "grad_norm": 0.7503094673156738,
64
+ "learning_rate": 0.0005300000000000001,
65
+ "loss": 0.1713,
66
+ "step": 800
67
+ },
68
+ {
69
+ "epoch": 5.294117647058823,
70
+ "grad_norm": 0.9175742268562317,
71
+ "learning_rate": 0.00047117647058823533,
72
+ "loss": 0.1562,
73
+ "step": 900
74
+ },
75
+ {
76
+ "epoch": 5.882352941176471,
77
+ "grad_norm": 1.0091431140899658,
78
+ "learning_rate": 0.0004123529411764706,
79
+ "loss": 0.1596,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 6.470588235294118,
84
+ "grad_norm": 0.6883541941642761,
85
+ "learning_rate": 0.0003535294117647059,
86
+ "loss": 0.1435,
87
+ "step": 1100
88
+ },
89
+ {
90
+ "epoch": 7.0588235294117645,
91
+ "grad_norm": 0.7159141898155212,
92
+ "learning_rate": 0.0002947058823529412,
93
+ "loss": 0.1385,
94
+ "step": 1200
95
+ },
96
+ {
97
+ "epoch": 7.647058823529412,
98
+ "grad_norm": 0.5263181924819946,
99
+ "learning_rate": 0.00023588235294117648,
100
+ "loss": 0.1278,
101
+ "step": 1300
102
+ },
103
+ {
104
+ "epoch": 8.235294117647058,
105
+ "grad_norm": 0.7766987681388855,
106
+ "learning_rate": 0.00017705882352941178,
107
+ "loss": 0.1137,
108
+ "step": 1400
109
+ },
110
+ {
111
+ "epoch": 8.823529411764707,
112
+ "grad_norm": 0.5680007338523865,
113
+ "learning_rate": 0.00011823529411764706,
114
+ "loss": 0.109,
115
+ "step": 1500
116
+ }
117
+ ],
118
+ "logging_steps": 100,
119
+ "max_steps": 1700,
120
+ "num_input_tokens_seen": 0,
121
+ "num_train_epochs": 10,
122
+ "save_steps": 500,
123
+ "stateful_callbacks": {
124
+ "TrainerControl": {
125
+ "args": {
126
+ "should_epoch_stop": false,
127
+ "should_evaluate": false,
128
+ "should_log": false,
129
+ "should_save": true,
130
+ "should_training_stop": false
131
+ },
132
+ "attributes": {}
133
+ }
134
+ },
135
+ "total_flos": 99203629449216.0,
136
+ "train_batch_size": 32,
137
+ "trial_name": null,
138
+ "trial_params": null
139
+ }
checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32774ff877551baa776c41f2f9ab000017cfa88cf56832d20d86e8ff5e86bb0b
3
+ size 5841
checkpoint-1700/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu",
3
+ "architectures": [
4
+ "PatchTSTForPrediction"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bias": true,
8
+ "channel_attention": false,
9
+ "channel_consistent_masking": false,
10
+ "context_length": 512,
11
+ "d_model": 128,
12
+ "distribution_output": "student_t",
13
+ "do_mask_input": null,
14
+ "dropout": 0.2,
15
+ "dtype": "float32",
16
+ "ff_dropout": 0.0,
17
+ "ffn_dim": 512,
18
+ "head_dropout": 0.0,
19
+ "init_std": 0.02,
20
+ "loss": "mse",
21
+ "mask_type": "random",
22
+ "mask_value": 0,
23
+ "model_type": "patchtst",
24
+ "norm_eps": 1e-05,
25
+ "norm_type": "batchnorm",
26
+ "num_attention_heads": 4,
27
+ "num_forecast_mask_patches": [
28
+ 2
29
+ ],
30
+ "num_hidden_layers": 3,
31
+ "num_input_channels": 1,
32
+ "num_parallel_samples": 100,
33
+ "num_targets": 1,
34
+ "output_range": null,
35
+ "patch_length": 16,
36
+ "patch_stride": 1,
37
+ "path_dropout": 0.0,
38
+ "pooling_type": "mean",
39
+ "positional_dropout": 0.0,
40
+ "positional_encoding_type": "sincos",
41
+ "pre_norm": true,
42
+ "prediction_length": 96,
43
+ "random_mask_ratio": 0.5,
44
+ "scaling": "std",
45
+ "share_embedding": true,
46
+ "share_projection": true,
47
+ "stride": 8,
48
+ "transformers_version": "4.57.2",
49
+ "unmasked_channel_indices": null,
50
+ "use_cls_token": false
51
+ }
checkpoint-1700/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21dc4879ece6a29c81f0a7f8d8308fc4abebdb09a65c0a1a3bcdcb183e3f3c3d
3
+ size 2706232
checkpoint-1700/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4276747baa7d90ecde3827ea14aaa1aa4bd718694e6e32c90ab0470464454585
3
+ size 4919755
checkpoint-1700/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cd930da9783ca70bad4b9cdeee6a06c0acea8f34645a333c93341f487f66a3
3
+ size 14645
checkpoint-1700/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:831b1bc8606cc8d89268945da8ee1eae567fffaf4b2bc45f4bc9b1b5eaa02c13
3
+ size 1465
checkpoint-1700/trainer_state.json ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 10.0,
6
+ "eval_steps": 500,
7
+ "global_step": 1700,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.5882352941176471,
14
+ "grad_norm": 0.6052117347717285,
15
+ "learning_rate": 0.000941764705882353,
16
+ "loss": 0.4992,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 1.1764705882352942,
21
+ "grad_norm": 0.9228031635284424,
22
+ "learning_rate": 0.0008829411764705883,
23
+ "loss": 0.3017,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 1.7647058823529411,
28
+ "grad_norm": 0.9054152965545654,
29
+ "learning_rate": 0.0008241176470588235,
30
+ "loss": 0.2262,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 2.3529411764705883,
35
+ "grad_norm": 1.4709264039993286,
36
+ "learning_rate": 0.0007652941176470588,
37
+ "loss": 0.2054,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 2.9411764705882355,
42
+ "grad_norm": 0.9707149267196655,
43
+ "learning_rate": 0.0007064705882352941,
44
+ "loss": 0.189,
45
+ "step": 500
46
+ },
47
+ {
48
+ "epoch": 3.5294117647058822,
49
+ "grad_norm": 1.0779001712799072,
50
+ "learning_rate": 0.0006476470588235295,
51
+ "loss": 0.1946,
52
+ "step": 600
53
+ },
54
+ {
55
+ "epoch": 4.117647058823529,
56
+ "grad_norm": 0.618778645992279,
57
+ "learning_rate": 0.0005888235294117648,
58
+ "loss": 0.1707,
59
+ "step": 700
60
+ },
61
+ {
62
+ "epoch": 4.705882352941177,
63
+ "grad_norm": 0.7503094673156738,
64
+ "learning_rate": 0.0005300000000000001,
65
+ "loss": 0.1713,
66
+ "step": 800
67
+ },
68
+ {
69
+ "epoch": 5.294117647058823,
70
+ "grad_norm": 0.9175742268562317,
71
+ "learning_rate": 0.00047117647058823533,
72
+ "loss": 0.1562,
73
+ "step": 900
74
+ },
75
+ {
76
+ "epoch": 5.882352941176471,
77
+ "grad_norm": 1.0091431140899658,
78
+ "learning_rate": 0.0004123529411764706,
79
+ "loss": 0.1596,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 6.470588235294118,
84
+ "grad_norm": 0.6883541941642761,
85
+ "learning_rate": 0.0003535294117647059,
86
+ "loss": 0.1435,
87
+ "step": 1100
88
+ },
89
+ {
90
+ "epoch": 7.0588235294117645,
91
+ "grad_norm": 0.7159141898155212,
92
+ "learning_rate": 0.0002947058823529412,
93
+ "loss": 0.1385,
94
+ "step": 1200
95
+ },
96
+ {
97
+ "epoch": 7.647058823529412,
98
+ "grad_norm": 0.5263181924819946,
99
+ "learning_rate": 0.00023588235294117648,
100
+ "loss": 0.1278,
101
+ "step": 1300
102
+ },
103
+ {
104
+ "epoch": 8.235294117647058,
105
+ "grad_norm": 0.7766987681388855,
106
+ "learning_rate": 0.00017705882352941178,
107
+ "loss": 0.1137,
108
+ "step": 1400
109
+ },
110
+ {
111
+ "epoch": 8.823529411764707,
112
+ "grad_norm": 0.5680007338523865,
113
+ "learning_rate": 0.00011823529411764706,
114
+ "loss": 0.109,
115
+ "step": 1500
116
+ },
117
+ {
118
+ "epoch": 9.411764705882353,
119
+ "grad_norm": 0.6185672879219055,
120
+ "learning_rate": 5.9411764705882355e-05,
121
+ "loss": 0.1049,
122
+ "step": 1600
123
+ },
124
+ {
125
+ "epoch": 10.0,
126
+ "grad_norm": 0.800317108631134,
127
+ "learning_rate": 5.88235294117647e-07,
128
+ "loss": 0.1046,
129
+ "step": 1700
130
+ }
131
+ ],
132
+ "logging_steps": 100,
133
+ "max_steps": 1700,
134
+ "num_input_tokens_seen": 0,
135
+ "num_train_epochs": 10,
136
+ "save_steps": 500,
137
+ "stateful_callbacks": {
138
+ "TrainerControl": {
139
+ "args": {
140
+ "should_epoch_stop": false,
141
+ "should_evaluate": false,
142
+ "should_log": false,
143
+ "should_save": true,
144
+ "should_training_stop": true
145
+ },
146
+ "attributes": {}
147
+ }
148
+ },
149
+ "total_flos": 112426920837120.0,
150
+ "train_batch_size": 32,
151
+ "trial_name": null,
152
+ "trial_params": null
153
+ }
checkpoint-1700/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32774ff877551baa776c41f2f9ab000017cfa88cf56832d20d86e8ff5e86bb0b
3
+ size 5841
checkpoint-500/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu",
3
+ "architectures": [
4
+ "PatchTSTForPrediction"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bias": true,
8
+ "channel_attention": false,
9
+ "channel_consistent_masking": false,
10
+ "context_length": 512,
11
+ "d_model": 128,
12
+ "distribution_output": "student_t",
13
+ "do_mask_input": null,
14
+ "dropout": 0.2,
15
+ "dtype": "float32",
16
+ "ff_dropout": 0.0,
17
+ "ffn_dim": 512,
18
+ "head_dropout": 0.0,
19
+ "init_std": 0.02,
20
+ "loss": "mse",
21
+ "mask_type": "random",
22
+ "mask_value": 0,
23
+ "model_type": "patchtst",
24
+ "norm_eps": 1e-05,
25
+ "norm_type": "batchnorm",
26
+ "num_attention_heads": 4,
27
+ "num_forecast_mask_patches": [
28
+ 2
29
+ ],
30
+ "num_hidden_layers": 3,
31
+ "num_input_channels": 1,
32
+ "num_parallel_samples": 100,
33
+ "num_targets": 1,
34
+ "output_range": null,
35
+ "patch_length": 16,
36
+ "patch_stride": 1,
37
+ "path_dropout": 0.0,
38
+ "pooling_type": "mean",
39
+ "positional_dropout": 0.0,
40
+ "positional_encoding_type": "sincos",
41
+ "pre_norm": true,
42
+ "prediction_length": 96,
43
+ "random_mask_ratio": 0.5,
44
+ "scaling": "std",
45
+ "share_embedding": true,
46
+ "share_projection": true,
47
+ "stride": 8,
48
+ "transformers_version": "4.57.2",
49
+ "unmasked_channel_indices": null,
50
+ "use_cls_token": false
51
+ }
checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f2a4df6a37059d8e9667493de8891843cbb4f04f005ae81c9bab11957dd97e8
3
+ size 2706232
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d2823881ef847728d49306e0178c242950001ced89b5bc58ea56983f2dde9e8
3
+ size 4919755
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4a9f217e852f439efa6bd32fde98d6867f11aa6ea13ddc021ba10af6a0b0934
3
+ size 14645
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d890428ddf5f25f5d14a33c2b07b254aeced1824515a0802d8099c5d1c1f9dc
3
+ size 1465
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.9411764705882355,
6
+ "eval_steps": 500,
7
+ "global_step": 500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.5882352941176471,
14
+ "grad_norm": 0.6052117347717285,
15
+ "learning_rate": 0.000941764705882353,
16
+ "loss": 0.4992,
17
+ "step": 100
18
+ },
19
+ {
20
+ "epoch": 1.1764705882352942,
21
+ "grad_norm": 0.9228031635284424,
22
+ "learning_rate": 0.0008829411764705883,
23
+ "loss": 0.3017,
24
+ "step": 200
25
+ },
26
+ {
27
+ "epoch": 1.7647058823529411,
28
+ "grad_norm": 0.9054152965545654,
29
+ "learning_rate": 0.0008241176470588235,
30
+ "loss": 0.2262,
31
+ "step": 300
32
+ },
33
+ {
34
+ "epoch": 2.3529411764705883,
35
+ "grad_norm": 1.4709264039993286,
36
+ "learning_rate": 0.0007652941176470588,
37
+ "loss": 0.2054,
38
+ "step": 400
39
+ },
40
+ {
41
+ "epoch": 2.9411764705882355,
42
+ "grad_norm": 0.9707149267196655,
43
+ "learning_rate": 0.0007064705882352941,
44
+ "loss": 0.189,
45
+ "step": 500
46
+ }
47
+ ],
48
+ "logging_steps": 100,
49
+ "max_steps": 1700,
50
+ "num_input_tokens_seen": 0,
51
+ "num_train_epochs": 10,
52
+ "save_steps": 500,
53
+ "stateful_callbacks": {
54
+ "TrainerControl": {
55
+ "args": {
56
+ "should_epoch_stop": false,
57
+ "should_evaluate": false,
58
+ "should_log": false,
59
+ "should_save": true,
60
+ "should_training_stop": false
61
+ },
62
+ "attributes": {}
63
+ }
64
+ },
65
+ "total_flos": 33070633058304.0,
66
+ "train_batch_size": 32,
67
+ "trial_name": null,
68
+ "trial_params": null
69
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32774ff877551baa776c41f2f9ab000017cfa88cf56832d20d86e8ff5e86bb0b
3
+ size 5841