lhallee commited on
Commit
e19c8d7
·
verified ·
1 Parent(s): 8c1cafd

Training in progress, step 3000, checkpoint

Browse files
checkpoint-3000/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "annotation_transformer": true,
3
+ "architectures": [
4
+ "CAMP"
5
+ ],
6
+ "diff": false,
7
+ "dropout": 0.05,
8
+ "hidden_dim": 640,
9
+ "input_dim": 384,
10
+ "intermediate_dim": 2560,
11
+ "kernel_size": 11,
12
+ "latent": false,
13
+ "mlm": false,
14
+ "mnr": false,
15
+ "model_type": "CAMP",
16
+ "nhead": 8,
17
+ "nlp_path": "lhallee/annotation_transformer_uniref90",
18
+ "num_hidden_layers": 1,
19
+ "out_dim": 512,
20
+ "plm_path": "facebook/esm2_t33_650m_UR50D",
21
+ "pooling": "avg",
22
+ "token": null,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.41.2"
25
+ }
checkpoint-3000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89864b0c3d04fa0b4d87847b254d7ce6192e0183d6e6f6c458865662fc517d11
3
+ size 1384137156
checkpoint-3000/trainer_state.json ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.18615040953090098,
5
+ "eval_steps": 1000,
6
+ "global_step": 3000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0062050136510300325,
13
+ "grad_norm": 101.88961029052734,
14
+ "learning_rate": 0.0001,
15
+ "loss": 4.5088,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.012410027302060065,
20
+ "grad_norm": 99.7363510131836,
21
+ "learning_rate": 9.999038127056248e-05,
22
+ "loss": 0.8644,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.018615040953090096,
27
+ "grad_norm": 59.41786575317383,
28
+ "learning_rate": 9.996152878304816e-05,
29
+ "loss": 0.7189,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.02482005460412013,
34
+ "grad_norm": 61.05416488647461,
35
+ "learning_rate": 9.991345363842789e-05,
36
+ "loss": 0.6253,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.03102506825515016,
41
+ "grad_norm": 53.87808609008789,
42
+ "learning_rate": 9.9846174333574e-05,
43
+ "loss": 0.5863,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.03723008190618019,
48
+ "grad_norm": 48.58293914794922,
49
+ "learning_rate": 9.975971675414371e-05,
50
+ "loss": 0.5555,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.04343509555721023,
55
+ "grad_norm": 42.353153228759766,
56
+ "learning_rate": 9.965411416461959e-05,
57
+ "loss": 0.5566,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.04964010920824026,
62
+ "grad_norm": 46.06748580932617,
63
+ "learning_rate": 9.952940719551112e-05,
64
+ "loss": 0.5425,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.05584512285927029,
69
+ "grad_norm": 46.486637115478516,
70
+ "learning_rate": 9.938564382772205e-05,
71
+ "loss": 0.5004,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.06205013651030032,
76
+ "grad_norm": 43.99421691894531,
77
+ "learning_rate": 9.922287937408994e-05,
78
+ "loss": 0.4722,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.06205013651030032,
83
+ "eval_avg_non_pair_similarity": -0.00011276310920446573,
84
+ "eval_avg_pair_similarity": 0.0020758428336121143,
85
+ "eval_loss": 0.44835320115089417,
86
+ "eval_runtime": 52.6112,
87
+ "eval_samples_per_second": 9.504,
88
+ "eval_similarity_ratio": -18.408882552609725,
89
+ "eval_steps_per_second": 0.304,
90
+ "step": 1000
91
+ },
92
+ {
93
+ "epoch": 0.06825515016133035,
94
+ "grad_norm": 38.14698791503906,
95
+ "learning_rate": 9.904117645810441e-05,
96
+ "loss": 0.4444,
97
+ "step": 1100
98
+ },
99
+ {
100
+ "epoch": 0.07446016381236038,
101
+ "grad_norm": 29.508113861083984,
102
+ "learning_rate": 9.884060498981296e-05,
103
+ "loss": 0.4615,
104
+ "step": 1200
105
+ },
106
+ {
107
+ "epoch": 0.08066517746339041,
108
+ "grad_norm": 34.64622497558594,
109
+ "learning_rate": 9.862124213892304e-05,
110
+ "loss": 0.4665,
111
+ "step": 1300
112
+ },
113
+ {
114
+ "epoch": 0.08687019111442046,
115
+ "grad_norm": 40.75010299682617,
116
+ "learning_rate": 9.838317230511112e-05,
117
+ "loss": 0.4275,
118
+ "step": 1400
119
+ },
120
+ {
121
+ "epoch": 0.09307520476545049,
122
+ "grad_norm": 28.284589767456055,
123
+ "learning_rate": 9.81264870855499e-05,
124
+ "loss": 0.4129,
125
+ "step": 1500
126
+ },
127
+ {
128
+ "epoch": 0.09928021841648052,
129
+ "grad_norm": 34.2197151184082,
130
+ "learning_rate": 9.785128523966653e-05,
131
+ "loss": 0.4389,
132
+ "step": 1600
133
+ },
134
+ {
135
+ "epoch": 0.10548523206751055,
136
+ "grad_norm": 27.808895111083984,
137
+ "learning_rate": 9.755767265114484e-05,
138
+ "loss": 0.4184,
139
+ "step": 1700
140
+ },
141
+ {
142
+ "epoch": 0.11169024571854058,
143
+ "grad_norm": 35.00907897949219,
144
+ "learning_rate": 9.724576228718678e-05,
145
+ "loss": 0.3995,
146
+ "step": 1800
147
+ },
148
+ {
149
+ "epoch": 0.11789525936957061,
150
+ "grad_norm": 30.57769203186035,
151
+ "learning_rate": 9.691567415504832e-05,
152
+ "loss": 0.415,
153
+ "step": 1900
154
+ },
155
+ {
156
+ "epoch": 0.12410027302060064,
157
+ "grad_norm": 26.989404678344727,
158
+ "learning_rate": 9.656753525586681e-05,
159
+ "loss": 0.4052,
160
+ "step": 2000
161
+ },
162
+ {
163
+ "epoch": 0.12410027302060064,
164
+ "eval_avg_non_pair_similarity": 0.0006865022985091086,
165
+ "eval_avg_pair_similarity": 0.007665629971772433,
166
+ "eval_loss": 0.3601702153682709,
167
+ "eval_runtime": 52.565,
168
+ "eval_samples_per_second": 9.512,
169
+ "eval_similarity_ratio": 11.166211662247951,
170
+ "eval_steps_per_second": 0.304,
171
+ "step": 2000
172
+ },
173
+ {
174
+ "epoch": 0.1303052866716307,
175
+ "grad_norm": 32.37137222290039,
176
+ "learning_rate": 9.620147953579737e-05,
177
+ "loss": 0.3979,
178
+ "step": 2100
179
+ },
180
+ {
181
+ "epoch": 0.1365103003226607,
182
+ "grad_norm": 22.6475772857666,
183
+ "learning_rate": 9.581764783447719e-05,
184
+ "loss": 0.3807,
185
+ "step": 2200
186
+ },
187
+ {
188
+ "epoch": 0.14271531397369075,
189
+ "grad_norm": 22.612077713012695,
190
+ "learning_rate": 9.54161878308377e-05,
191
+ "loss": 0.3711,
192
+ "step": 2300
193
+ },
194
+ {
195
+ "epoch": 0.14892032762472077,
196
+ "grad_norm": 30.973310470581055,
197
+ "learning_rate": 9.499725398628507e-05,
198
+ "loss": 0.3658,
199
+ "step": 2400
200
+ },
201
+ {
202
+ "epoch": 0.1551253412757508,
203
+ "grad_norm": 24.443492889404297,
204
+ "learning_rate": 9.456100748527143e-05,
205
+ "loss": 0.37,
206
+ "step": 2500
207
+ },
208
+ {
209
+ "epoch": 0.16133035492678083,
210
+ "grad_norm": 33.75666809082031,
211
+ "learning_rate": 9.410761617327921e-05,
212
+ "loss": 0.3512,
213
+ "step": 2600
214
+ },
215
+ {
216
+ "epoch": 0.16753536857781087,
217
+ "grad_norm": 23.727365493774414,
218
+ "learning_rate": 9.363725449224282e-05,
219
+ "loss": 0.3318,
220
+ "step": 2700
221
+ },
222
+ {
223
+ "epoch": 0.17374038222884092,
224
+ "grad_norm": 28.48720359802246,
225
+ "learning_rate": 9.315010341343213e-05,
226
+ "loss": 0.3729,
227
+ "step": 2800
228
+ },
229
+ {
230
+ "epoch": 0.17994539587987093,
231
+ "grad_norm": 28.53714370727539,
232
+ "learning_rate": 9.264635036782405e-05,
233
+ "loss": 0.3369,
234
+ "step": 2900
235
+ },
236
+ {
237
+ "epoch": 0.18615040953090098,
238
+ "grad_norm": 21.766740798950195,
239
+ "learning_rate": 9.212618917398855e-05,
240
+ "loss": 0.317,
241
+ "step": 3000
242
+ },
243
+ {
244
+ "epoch": 0.18615040953090098,
245
+ "eval_avg_non_pair_similarity": 0.00025785160030682527,
246
+ "eval_avg_pair_similarity": 0.0073178326906636355,
247
+ "eval_loss": 0.323689728975296,
248
+ "eval_runtime": 52.8381,
249
+ "eval_samples_per_second": 9.463,
250
+ "eval_similarity_ratio": 28.38001657525464,
251
+ "eval_steps_per_second": 0.303,
252
+ "step": 3000
253
+ }
254
+ ],
255
+ "logging_steps": 100,
256
+ "max_steps": 16116,
257
+ "num_input_tokens_seen": 0,
258
+ "num_train_epochs": 1,
259
+ "save_steps": 1000,
260
+ "stateful_callbacks": {
261
+ "TrainerControl": {
262
+ "args": {
263
+ "should_epoch_stop": false,
264
+ "should_evaluate": false,
265
+ "should_log": false,
266
+ "should_save": true,
267
+ "should_training_stop": false
268
+ },
269
+ "attributes": {}
270
+ }
271
+ },
272
+ "total_flos": 0.0,
273
+ "train_batch_size": 32,
274
+ "trial_name": null,
275
+ "trial_params": null
276
+ }
checkpoint-3000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45f6f301aa93fd04cb88245f1be6b5d49fae9be8dd0abe1d8a346a6aadd21663
3
+ size 5112