panikos commited on
Commit
d08b211
Β·
verified Β·
1 Parent(s): 7425d22

Upload train_sapbert_extended_fixed.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_sapbert_extended_fixed.py +390 -0
train_sapbert_extended_fixed.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "transformers>=4.38.0",
4
+ # "datasets>=2.16.0",
5
+ # "torch>=2.1.0",
6
+ # "scikit-learn>=1.3.0",
7
+ # "accelerate>=0.26.0",
8
+ # ]
9
+ # ///
10
+
11
+ """
12
+ SAPBERT Training on Extended FDA LOINC2SDTM Dataset
13
+ Multi-label classification for 8 SDTM fields
14
+ FIXED VERSION with better error handling and logging
15
+ """
16
+
17
+ import os
18
+ import sys
19
+ import json
20
+ import traceback
21
+ from datasets import load_dataset
22
+ from transformers import (
23
+ AutoTokenizer,
24
+ AutoModel,
25
+ TrainingArguments,
26
+ Trainer,
27
+ )
28
+ import torch
29
+ import torch.nn as nn
30
+
31
+ def log(msg):
32
+ """Print with flush to ensure immediate output"""
33
+ print(msg, flush=True)
34
+
35
+ try:
36
+ log("=" * 80)
37
+ log("SAPBERT TRAINING - Extended FDA Dataset (8 SDTM Fields)")
38
+ log("FIXED VERSION - Enhanced error handling and logging")
39
+ log("=" * 80)
40
+
41
+ # Configuration
42
+ BASE_MODEL = "cambridgeltl/SapBERT-from-PubMedBERT-fulltext"
43
+ DATASET_NAME = "panikos/loinc2sdtm-fda-extended"
44
+ OUTPUT_DIR = "loinc2sdtm-sapbert-extended-model"
45
+ HF_USERNAME = "panikos"
46
+
47
+ # Fields to train on (using only the 8 core SDTM fields)
48
+ TRAIN_FIELDS = [
49
+ 'lbtestcd',
50
+ 'lbtest',
51
+ 'lbspec',
52
+ 'lbstresu',
53
+ 'lbmethod',
54
+ 'lbptfl',
55
+ 'lbrestyp',
56
+ 'lbresscl',
57
+ ]
58
+
59
+ log("\n[1/7] Loading extended FDA structured dataset...")
60
+ log(f" Dataset: {DATASET_NAME}")
61
+
62
+ try:
63
+ dataset = load_dataset(DATASET_NAME, split="train")
64
+ log(f" βœ“ Loaded {len(dataset)} examples from FDA source")
65
+ log(f" βœ“ Training on {len(TRAIN_FIELDS)} SDTM fields")
66
+ log(f" βœ“ Dataset features: {list(dataset.features.keys())}")
67
+ except Exception as e:
68
+ log(f" βœ— FAILED to load dataset!")
69
+ log(f" Error: {str(e)}")
70
+ traceback.print_exc()
71
+ sys.exit(1)
72
+
73
+ # Build vocabularies
74
+ log("\n[2/7] Building field vocabularies...")
75
+ vocabularies = {field: set() for field in TRAIN_FIELDS}
76
+
77
+ try:
78
+ for i, example in enumerate(dataset):
79
+ if i % 500 == 0:
80
+ log(f" Processing example {i}/{len(dataset)}...")
81
+ for field in TRAIN_FIELDS:
82
+ value = example.get(field, '')
83
+ if value and value.strip():
84
+ vocabularies[field].add(value.upper().strip())
85
+
86
+ vocabularies = {k: sorted(list(v)) for k, v in vocabularies.items()}
87
+ log(" βœ“ Vocabulary sizes:")
88
+ for field, vocab in vocabularies.items():
89
+ log(f" {field.upper()}: {len(vocab)} unique values")
90
+ except Exception as e:
91
+ log(f" βœ— FAILED to build vocabularies!")
92
+ log(f" Error: {str(e)}")
93
+ traceback.print_exc()
94
+ sys.exit(1)
95
+
96
+ # Create label mappings
97
+ try:
98
+ label2id = {
99
+ field: {label: idx for idx, label in enumerate(vocab)}
100
+ for field, vocab in vocabularies.items()
101
+ }
102
+ id2label = {
103
+ field: {idx: label for label, idx in mapping.items()}
104
+ for field, mapping in label2id.items()
105
+ }
106
+ log(" βœ“ Label mappings created")
107
+ except Exception as e:
108
+ log(f" βœ— FAILED to create label mappings!")
109
+ log(f" Error: {str(e)}")
110
+ traceback.print_exc()
111
+ sys.exit(1)
112
+
113
+ log("\n[3/7] Loading SAPBERT model...")
114
+ log(f" Base model: {BASE_MODEL}")
115
+
116
+ try:
117
+ tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
118
+ log(" βœ“ Tokenizer loaded")
119
+ base_model = AutoModel.from_pretrained(BASE_MODEL)
120
+ log(" βœ“ Base model loaded successfully!")
121
+ except Exception as e:
122
+ log(f" βœ— FAILED to load SAPBERT model!")
123
+ log(f" Error: {str(e)}")
124
+ traceback.print_exc()
125
+ sys.exit(1)
126
+
127
+ # Multi-label classifier with LOINC metadata as input
128
+ class LOINC2SDTMClassifier(nn.Module):
129
+ def __init__(self, base_model, num_classes_dict):
130
+ super().__init__()
131
+ self.encoder = base_model
132
+ self.config = base_model.config
133
+ self.hidden_size = base_model.config.hidden_size
134
+
135
+ self.classifiers = nn.ModuleDict({
136
+ field: nn.Sequential(
137
+ nn.Linear(self.hidden_size, self.hidden_size // 2),
138
+ nn.ReLU(),
139
+ nn.Dropout(0.1),
140
+ nn.Linear(self.hidden_size // 2, num_classes)
141
+ )
142
+ for field, num_classes in num_classes_dict.items()
143
+ })
144
+
145
+ def forward(self, input_ids, attention_mask, labels=None):
146
+ outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask)
147
+ cls_embedding = outputs.last_hidden_state[:, 0, :]
148
+
149
+ logits = {
150
+ field: classifier(cls_embedding)
151
+ for field, classifier in self.classifiers.items()
152
+ }
153
+
154
+ loss = None
155
+ if labels is not None:
156
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
157
+ losses = []
158
+ for field in logits.keys():
159
+ if field in labels:
160
+ field_loss = loss_fct(logits[field], labels[field])
161
+ if not torch.isnan(field_loss):
162
+ losses.append(field_loss)
163
+ if losses:
164
+ loss = sum(losses) / len(losses)
165
+
166
+ return {'loss': loss, 'logits': logits}
167
+
168
+ try:
169
+ num_classes_dict = {field: len(vocab) for field, vocab in vocabularies.items()}
170
+ model = LOINC2SDTMClassifier(base_model, num_classes_dict)
171
+ total_params = sum(p.numel() for p in model.parameters())
172
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
173
+ log(f"\n[4/7] Classifier created:")
174
+ log(f" Total parameters: {total_params:,}")
175
+ log(f" Trainable parameters: {trainable_params:,}")
176
+ log(f" βœ“ Model architecture initialized")
177
+ except Exception as e:
178
+ log(f" βœ— FAILED to create classifier!")
179
+ log(f" Error: {str(e)}")
180
+ traceback.print_exc()
181
+ sys.exit(1)
182
+
183
+ # Prepare dataset
184
+ class LOINC2SDTMDataset(torch.utils.data.Dataset):
185
+ def __init__(self, dataset, tokenizer, label2id, train_fields):
186
+ self.examples = []
187
+ log(f" Creating dataset wrapper for {len(dataset)} examples...")
188
+
189
+ for i, example in enumerate(dataset):
190
+ if i % 500 == 0:
191
+ log(f" Processed {i}/{len(dataset)} examples...")
192
+
193
+ # Create rich input combining LOINC code and metadata
194
+ loinc_code = example['loinc_code']
195
+ component = example.get('component', '')
196
+ property_val = example.get('property', '')
197
+ system = example.get('system', '')
198
+
199
+ # Rich input: LOINC code + key metadata
200
+ input_text = f"{loinc_code} {component} {property_val} {system}"
201
+
202
+ # Tokenize input
203
+ encoding = tokenizer(
204
+ input_text,
205
+ padding='max_length',
206
+ truncation=True,
207
+ max_length=64,
208
+ return_tensors='pt'
209
+ )
210
+
211
+ # Get labels for trained fields
212
+ labels = {}
213
+ for field in train_fields:
214
+ value = example.get(field, '')
215
+ if value and value.strip():
216
+ value_upper = value.upper().strip()
217
+ if value_upper in label2id[field]:
218
+ labels[field] = label2id[field][value_upper]
219
+ else:
220
+ labels[field] = -100
221
+ else:
222
+ labels[field] = -100
223
+
224
+ self.examples.append({
225
+ 'input_ids': encoding['input_ids'].squeeze(0),
226
+ 'attention_mask': encoding['attention_mask'].squeeze(0),
227
+ 'labels': labels
228
+ })
229
+
230
+ def __len__(self):
231
+ return len(self.examples)
232
+
233
+ def __getitem__(self, idx):
234
+ return self.examples[idx]
235
+
236
+ log("\n[5/7] Preparing training data...")
237
+ try:
238
+ train_dataset = LOINC2SDTMDataset(dataset, tokenizer, label2id, TRAIN_FIELDS)
239
+ log(f" βœ“ Prepared {len(train_dataset)} training examples")
240
+ except Exception as e:
241
+ log(f" βœ— FAILED to prepare training data!")
242
+ log(f" Error: {str(e)}")
243
+ traceback.print_exc()
244
+ sys.exit(1)
245
+
246
+ # Custom collator
247
+ def collate_fn(batch):
248
+ input_ids = torch.stack([item['input_ids'] for item in batch])
249
+ attention_mask = torch.stack([item['attention_mask'] for item in batch])
250
+ labels = {
251
+ field: torch.tensor([item['labels'][field] for item in batch])
252
+ for field in TRAIN_FIELDS
253
+ }
254
+ return {
255
+ 'input_ids': input_ids,
256
+ 'attention_mask': attention_mask,
257
+ 'labels': labels
258
+ }
259
+
260
+ # Training args
261
+ training_args = TrainingArguments(
262
+ output_dir=OUTPUT_DIR,
263
+ num_train_epochs=10,
264
+ per_device_train_batch_size=32,
265
+ gradient_accumulation_steps=1,
266
+ learning_rate=2e-5,
267
+ lr_scheduler_type="cosine",
268
+ warmup_ratio=0.1,
269
+ logging_steps=10, # More frequent logging
270
+ logging_first_step=True,
271
+ save_strategy="epoch",
272
+ save_total_limit=2,
273
+ fp16=False,
274
+ bf16=True,
275
+ report_to="none",
276
+ push_to_hub=True,
277
+ hub_model_id=f"{HF_USERNAME}/{OUTPUT_DIR}",
278
+ hub_strategy="end",
279
+ )
280
+
281
+ log("\n[6/7] Training configuration:")
282
+ log(f" Epochs: {training_args.num_train_epochs}")
283
+ log(f" Batch size: {training_args.per_device_train_batch_size}")
284
+ log(f" Learning rate: {training_args.learning_rate}")
285
+ log(f" Steps per epoch: ~{len(train_dataset) // training_args.per_device_train_batch_size}")
286
+ log(f" Total steps: ~{(len(train_dataset) // training_args.per_device_train_batch_size) * training_args.num_train_epochs}")
287
+ log(f" Input: LOINC code + metadata (component, property, system)")
288
+ log(f" Output: {len(TRAIN_FIELDS)} SDTM fields")
289
+ log(f" Mixed precision: {'BF16' if training_args.bf16 else 'FP16' if training_args.fp16 else 'FP32'}")
290
+
291
+ # Custom trainer
292
+ class MultiLabelTrainer(Trainer):
293
+ def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):
294
+ labels = inputs.pop("labels")
295
+ outputs = model(**inputs, labels=labels)
296
+ loss = outputs["loss"]
297
+
298
+ # Log loss periodically
299
+ if self.state.global_step % 10 == 0:
300
+ log(f" Step {self.state.global_step}: loss = {loss.item():.4f}")
301
+
302
+ return (loss, outputs) if return_outputs else loss
303
+
304
+ def get_train_dataloader(self):
305
+ from torch.utils.data import DataLoader
306
+ return DataLoader(
307
+ self.train_dataset,
308
+ batch_size=self.args.per_device_train_batch_size,
309
+ collate_fn=collate_fn,
310
+ shuffle=True
311
+ )
312
+
313
+ try:
314
+ trainer = MultiLabelTrainer(
315
+ model=model,
316
+ args=training_args,
317
+ train_dataset=train_dataset,
318
+ )
319
+ log(" βœ“ Trainer initialized")
320
+ except Exception as e:
321
+ log(f" βœ— FAILED to initialize trainer!")
322
+ log(f" Error: {str(e)}")
323
+ traceback.print_exc()
324
+ sys.exit(1)
325
+
326
+ log("\n[7/7] Starting training...")
327
+ log("=" * 80)
328
+ log("This will take approximately 15-20 minutes on A10G GPU")
329
+ log("=" * 80)
330
+
331
+ try:
332
+ trainer.train()
333
+ log("\n" + "=" * 80)
334
+ log("βœ“ Training completed successfully!")
335
+ log("=" * 80)
336
+ except Exception as e:
337
+ log(f"\nβœ— TRAINING FAILED!")
338
+ log(f"Error: {str(e)}")
339
+ traceback.print_exc()
340
+ sys.exit(1)
341
+
342
+ log("\nSaving model and vocabularies...")
343
+ try:
344
+ trainer.save_model(OUTPUT_DIR)
345
+ log(" βœ“ Model saved")
346
+ tokenizer.save_pretrained(OUTPUT_DIR)
347
+ log(" βœ“ Tokenizer saved")
348
+
349
+ # Save vocabularies and metadata
350
+ vocab_file = os.path.join(OUTPUT_DIR, "vocabularies.json")
351
+ with open(vocab_file, 'w') as f:
352
+ json.dump({
353
+ 'vocabularies': vocabularies,
354
+ 'label2id': label2id,
355
+ 'id2label': id2label,
356
+ 'train_fields': TRAIN_FIELDS
357
+ }, f, indent=2)
358
+ log(" βœ“ Vocabularies saved")
359
+ except Exception as e:
360
+ log(f" βœ— FAILED to save model!")
361
+ log(f" Error: {str(e)}")
362
+ traceback.print_exc()
363
+ sys.exit(1)
364
+
365
+ log("\nPushing to Hub...")
366
+ try:
367
+ trainer.push_to_hub()
368
+ log(" βœ“ Model pushed to Hub")
369
+ except Exception as e:
370
+ log(f" βœ— FAILED to push to Hub!")
371
+ log(f" Error: {str(e)}")
372
+ traceback.print_exc()
373
+ sys.exit(1)
374
+
375
+ log("\n" + "=" * 80)
376
+ log("βœ“ SUCCESS! Model training and upload complete!")
377
+ log("=" * 80)
378
+ log(f"Model available at: https://huggingface.co/{HF_USERNAME}/{OUTPUT_DIR}")
379
+ log(f"Trained on {len(TRAIN_FIELDS)} SDTM fields with rich LOINC metadata")
380
+ log(f"Total examples: {len(train_dataset)}")
381
+ log("=" * 80)
382
+
383
+ except Exception as e:
384
+ log("\n" + "=" * 80)
385
+ log("βœ— FATAL ERROR - Training script crashed!")
386
+ log("=" * 80)
387
+ log(f"Error: {str(e)}")
388
+ log("\nFull traceback:")
389
+ traceback.print_exc()
390
+ sys.exit(1)