davanstrien HF Staff commited on
Commit
95f8934
·
1 Parent(s): ea0d2ce

Add LightOnOCR fine-tuning script for OCR datasets

Browse files
Files changed (1) hide show
  1. lightonocr-finetune.py +829 -0
lightonocr-finetune.py ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "torch>=2.0.0",
5
+ # "datasets>=2.14.0",
6
+ # "accelerate>=0.24.0",
7
+ # "huggingface-hub",
8
+ # "pillow>=12.0.0",
9
+ # "jiwer>=3.0.0",
10
+ # "tqdm>=4.65.0",
11
+ # "transformers @ git+https://github.com/baptiste-aubertin/transformers.git@main",
12
+ # ]
13
+ # ///
14
+
15
+ """
16
+ Fine-tune LightOnOCR on OCR datasets.
17
+
18
+ LightOnOCR is an end-to-end trainable vision-language model specifically designed for OCR tasks.
19
+ This script enables fine-tuning on custom datasets for improved performance on specific domains,
20
+ languages, or document types.
21
+
22
+ Examples:
23
+ # Basic fine-tuning on IAM handwriting dataset
24
+ uv run lightonocr-finetune.py \
25
+ --dataset-id HuggingFaceM4/FineVision \
26
+ --subset iam \
27
+ --output-dir ./lightonocr-iam \
28
+ --epochs 2
29
+
30
+ # Fine-tune with frozen language model to save memory
31
+ uv run lightonocr-finetune.py \
32
+ --dataset-id HuggingFaceM4/FineVision \
33
+ --subset olmOCR-mix-0225-documents \
34
+ --output-dir ./lightonocr-docs \
35
+ --freeze-language \
36
+ --batch-size 8
37
+
38
+ # Push to Hub with evaluation metrics
39
+ uv run lightonocr-finetune.py \
40
+ --dataset-id HuggingFaceM4/FineVision \
41
+ --subset iam \
42
+ --hub-model-id username/lightonocr-iam \
43
+ --push-to-hub \
44
+ --eval-samples 100
45
+
46
+ # Run on HF Jobs with GPU
47
+ hf jobs run --gpu l4x1 \
48
+ uv run lightonocr-finetune.py \
49
+ --dataset-id custom/ocr-dataset \
50
+ --output-dir ./custom-ocr \
51
+ --epochs 3
52
+ """
53
+
54
+ import argparse
55
+ import json
56
+ import logging
57
+ import os
58
+ import sys
59
+ from datetime import datetime
60
+ from pathlib import Path
61
+ from typing import Dict, List, Optional, Tuple
62
+
63
+ import torch
64
+ from datasets import load_dataset, DatasetDict
65
+ from huggingface_hub import HfApi, login
66
+ from jiwer import cer, wer
67
+ from PIL import Image
68
+ from tqdm import tqdm
69
+ from transformers import (
70
+ AutoProcessor,
71
+ LightOnOCRForConditionalGeneration,
72
+ Trainer,
73
+ TrainingArguments,
74
+ )
75
+
76
+ logging.basicConfig(
77
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
78
+ )
79
+ logger = logging.getLogger(__name__)
80
+
81
+ # Constants for the assistant pattern in chat template
82
+ ASSISTANT_START_PATTERN = [151645, 1699, 151644, 77091, 1699]
83
+ DEFAULT_MAX_LENGTH = 1024
84
+ DEFAULT_LONGEST_EDGE = 700
85
+
86
+
87
+ class OCRDataCollator:
88
+ """Data collator for OCR fine-tuning."""
89
+
90
+ def __init__(
91
+ self,
92
+ processor,
93
+ max_length=DEFAULT_MAX_LENGTH,
94
+ longest_edge=DEFAULT_LONGEST_EDGE,
95
+ ):
96
+ self.processor = processor
97
+ self.max_length = max_length
98
+ self.longest_edge = longest_edge
99
+
100
+ def __call__(self, examples):
101
+ batch_messages = []
102
+ batch_images = []
103
+
104
+ for example in examples:
105
+ example_images = example["images"]
106
+ example_texts = example["texts"]
107
+
108
+ # Validate single image/text per example
109
+ if len(example_images) != 1 or len(example_texts) != 1:
110
+ logger.warning(
111
+ f"Skipping example with {len(example_images)} images and {len(example_texts)} texts"
112
+ )
113
+ continue
114
+
115
+ image = example_images[0].convert("RGB")
116
+ batch_images.append(image)
117
+
118
+ # Extract assistant text from conversation
119
+ conversation = example_texts[0]
120
+ assistant_text = conversation.get("assistant", "").strip()
121
+
122
+ messages = [
123
+ {"role": "user", "content": [{"type": "image"}]},
124
+ {
125
+ "role": "assistant",
126
+ "content": [{"type": "text", "text": assistant_text}],
127
+ },
128
+ ]
129
+ batch_messages.append(messages)
130
+
131
+ if len(batch_images) == 0:
132
+ return None
133
+
134
+ # Apply chat template
135
+ texts = [
136
+ self.processor.apply_chat_template(
137
+ messages, tokenize=False, add_generation_prompt=False
138
+ )
139
+ for messages in batch_messages
140
+ ]
141
+
142
+ # Process inputs
143
+ inputs = self.processor(
144
+ text=texts,
145
+ images=batch_images,
146
+ return_tensors="pt",
147
+ padding=True,
148
+ truncation=True,
149
+ max_length=self.max_length,
150
+ size={"longest_edge": self.longest_edge},
151
+ )
152
+
153
+ # Create labels (mask prompt, train only on assistant response)
154
+ labels = inputs["input_ids"].clone()
155
+ pad_token_id = self.processor.tokenizer.pad_token_id
156
+
157
+ for i in range(len(labels)):
158
+ full_ids = inputs["input_ids"][i].tolist()
159
+
160
+ # Find where assistant content starts
161
+ assistant_content_start = None
162
+
163
+ # Try the standard pattern: <|im_end|>\n<|im_start|>assistant\n
164
+ for idx in range(len(full_ids) - len(ASSISTANT_START_PATTERN)):
165
+ if (
166
+ full_ids[idx : idx + len(ASSISTANT_START_PATTERN)]
167
+ == ASSISTANT_START_PATTERN
168
+ ):
169
+ assistant_content_start = idx + len(ASSISTANT_START_PATTERN)
170
+ break
171
+
172
+ if assistant_content_start is None:
173
+ # Some samples may not have the exact pattern - this is expected
174
+ # The model will train on samples where the pattern is found
175
+ labels[i, :] = -100
176
+ else:
177
+ # Mask everything first
178
+ labels[i, :] = -100
179
+
180
+ # Unmask from assistant content start to end
181
+ for idx in range(assistant_content_start, len(full_ids)):
182
+ if full_ids[idx] == pad_token_id:
183
+ break
184
+ labels[i, idx] = inputs["input_ids"][i, idx]
185
+
186
+ # Mask padding tokens
187
+ labels[i, inputs["input_ids"][i] == pad_token_id] = -100
188
+
189
+ inputs["labels"] = labels
190
+
191
+ # Convert to proper dtype
192
+ inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
193
+
194
+ return inputs
195
+
196
+
197
+ def evaluate_model(
198
+ model,
199
+ processor,
200
+ dataset,
201
+ num_samples: int = 50,
202
+ batch_size: int = 8,
203
+ device: str = "cuda",
204
+ description: str = "Model",
205
+ ) -> Dict[str, float]:
206
+ """
207
+ Evaluate model on dataset and compute OCR metrics.
208
+
209
+ Returns:
210
+ Dictionary with CER, WER, and perfect match count
211
+ """
212
+ model.eval()
213
+ predictions = []
214
+ ground_truths = []
215
+
216
+ logger.info(f"Evaluating {description} on {num_samples} samples...")
217
+
218
+ # Process in batches
219
+ for start_idx in tqdm(range(0, min(num_samples, len(dataset)), batch_size)):
220
+ end_idx = min(start_idx + batch_size, num_samples, len(dataset))
221
+ batch_samples = [dataset[i] for i in range(start_idx, end_idx)]
222
+
223
+ batch_images = [[s["images"][0]] for s in batch_samples]
224
+ batch_ground_truths = [
225
+ s["texts"][0]["assistant"].strip() for s in batch_samples
226
+ ]
227
+
228
+ # Prepare inputs
229
+ messages = [{"role": "user", "content": [{"type": "image"}]}]
230
+ text = processor.apply_chat_template(
231
+ messages, tokenize=False, add_generation_prompt=True
232
+ )
233
+ texts = [text] * len(batch_images)
234
+
235
+ inputs = processor(
236
+ text=texts,
237
+ images=batch_images,
238
+ return_tensors="pt",
239
+ padding=True,
240
+ truncation=True,
241
+ max_length=DEFAULT_MAX_LENGTH,
242
+ size={"longest_edge": DEFAULT_LONGEST_EDGE},
243
+ ).to(device)
244
+ inputs["pixel_values"] = inputs["pixel_values"].to(torch.bfloat16)
245
+
246
+ # Generate predictions
247
+ with torch.no_grad():
248
+ outputs = model.generate(**inputs, max_new_tokens=512, do_sample=True)
249
+
250
+ input_length = inputs["input_ids"].shape[1]
251
+ generated_ids = outputs[:, input_length:]
252
+ batch_predictions = processor.batch_decode(
253
+ generated_ids, skip_special_tokens=True
254
+ )
255
+ batch_predictions = [p.strip() for p in batch_predictions]
256
+
257
+ predictions.extend(batch_predictions)
258
+ ground_truths.extend(batch_ground_truths)
259
+
260
+ # Compute metrics
261
+ cer_score = cer(ground_truths, predictions) * 100
262
+ wer_score = wer(ground_truths, predictions) * 100
263
+ perfect_matches = sum(
264
+ 1 for pred, gt in zip(predictions, ground_truths) if pred == gt
265
+ )
266
+
267
+ logger.info(
268
+ f"CER: {cer_score:.2f}% | WER: {wer_score:.2f}% | Perfect: {perfect_matches}/{num_samples}"
269
+ )
270
+
271
+ # Show a few examples
272
+ for i in range(min(3, len(predictions))):
273
+ match = "✅" if predictions[i] == ground_truths[i] else "❌"
274
+ logger.info(
275
+ f"{match} Sample {i + 1}: '{predictions[i][:50]}...' vs '{ground_truths[i][:50]}...'"
276
+ )
277
+
278
+ return {
279
+ "cer": cer_score,
280
+ "wer": wer_score,
281
+ "perfect_matches": perfect_matches,
282
+ "total_samples": num_samples,
283
+ }
284
+
285
+
286
+ def create_model_card_content(
287
+ model_id: str,
288
+ dataset_id: str,
289
+ subset: Optional[str],
290
+ base_metrics: Dict[str, float],
291
+ finetuned_metrics: Dict[str, float],
292
+ training_args: TrainingArguments,
293
+ freeze_config: Dict[str, bool],
294
+ ) -> str:
295
+ """Generate model card content with training details and metrics."""
296
+
297
+ # Calculate improvements
298
+ cer_improvement = base_metrics["cer"] - finetuned_metrics["cer"]
299
+ wer_improvement = base_metrics["wer"] - finetuned_metrics["wer"]
300
+ perfect_improvement = (
301
+ finetuned_metrics["perfect_matches"] - base_metrics["perfect_matches"]
302
+ )
303
+
304
+ # Determine which components were frozen
305
+ frozen_components = [comp for comp, is_frozen in freeze_config.items() if is_frozen]
306
+ frozen_str = (
307
+ ", ".join(frozen_components) if frozen_components else "None (full fine-tuning)"
308
+ )
309
+
310
+ dataset_str = f"{dataset_id}/{subset}" if subset else dataset_id
311
+
312
+ content = f"""---
313
+ license: mit
314
+ tags:
315
+ - vision
316
+ - ocr
317
+ - document-understanding
318
+ - transformers
319
+ base_model: lightonai/LightOnOCR-1B-1025
320
+ datasets:
321
+ - {dataset_id}
322
+ metrics:
323
+ - cer
324
+ - wer
325
+ library_name: transformers
326
+ ---
327
+
328
+ # {model_id.split("/")[-1]}
329
+
330
+ This model is a fine-tuned version of [LightOnOCR-1B-1025](https://huggingface.co/lightonai/LightOnOCR-1B-1025) on the {dataset_str} dataset.
331
+
332
+ ## Model Description
333
+
334
+ LightOnOCR is an end-to-end trainable vision-language model specifically designed for OCR tasks. This fine-tuned version has been optimized for improved performance on the target dataset.
335
+
336
+ ## Training Details
337
+
338
+ ### Dataset
339
+ - **Source**: {dataset_str}
340
+ - **Training samples**: {training_args.num_train_epochs * (training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps)}
341
+ - **Validation samples**: Used for model selection
342
+
343
+ ### Training Configuration
344
+ - **Epochs**: {training_args.num_train_epochs}
345
+ - **Batch size**: {training_args.per_device_train_batch_size} (effective: {training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps})
346
+ - **Learning rate**: {training_args.learning_rate}
347
+ - **Frozen components**: {frozen_str}
348
+ - **Hardware**: GPU with mixed precision (bf16)
349
+
350
+ ## Evaluation Results
351
+
352
+ ### Performance Comparison
353
+
354
+ | Metric | Base Model | Fine-tuned | Improvement |
355
+ |--------|------------|------------|-------------|
356
+ | **CER (%)** | {base_metrics["cer"]:.2f} | {finetuned_metrics["cer"]:.2f} | {cer_improvement:+.2f} |
357
+ | **WER (%)** | {base_metrics["wer"]:.2f} | {finetuned_metrics["wer"]:.2f} | {wer_improvement:+.2f} |
358
+ | **Perfect Matches** | {base_metrics["perfect_matches"]}/{base_metrics["total_samples"]} | {finetuned_metrics["perfect_matches"]}/{finetuned_metrics["total_samples"]} | {perfect_improvement:+d} |
359
+
360
+ *Lower is better for CER and WER. Evaluation performed on {finetuned_metrics["total_samples"]} test samples.*
361
+
362
+ ## Usage
363
+
364
+ ```python
365
+ from transformers import AutoProcessor, LightOnOCRForConditionalGeneration
366
+ from PIL import Image
367
+ import torch
368
+
369
+ # Load model and processor
370
+ model = LightOnOCRForConditionalGeneration.from_pretrained(
371
+ "{model_id}",
372
+ torch_dtype=torch.bfloat16,
373
+ device_map="auto"
374
+ )
375
+ processor = AutoProcessor.from_pretrained("{model_id}")
376
+
377
+ # Prepare image
378
+ image = Image.open("your_image.jpg").convert("RGB")
379
+
380
+ # Create prompt
381
+ messages = [
382
+ {{"role": "user", "content": [{{"type": "image"}}]}}
383
+ ]
384
+
385
+ # Process and generate
386
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
387
+ inputs = processor(
388
+ text=[text],
389
+ images=[[image]],
390
+ return_tensors="pt",
391
+ max_length=1024
392
+ ).to(model.device)
393
+
394
+ outputs = model.generate(**inputs, max_new_tokens=512)
395
+ generated_text = processor.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
396
+ print(generated_text)
397
+ ```
398
+
399
+ ## Training Script
400
+
401
+ This model was trained using the UV Scripts training pipeline. To reproduce or further fine-tune:
402
+
403
+ ```bash
404
+ uv run https://huggingface.co/datasets/uv-scripts/transformers-training/raw/main/lightonocr-finetune.py \\
405
+ --dataset-id {dataset_id} \\
406
+ {"--subset " + subset if subset else ""} \\
407
+ --output-dir ./model \\
408
+ --epochs {training_args.num_train_epochs}
409
+ ```
410
+
411
+ ## Citation
412
+
413
+ If you use this model, please cite:
414
+
415
+ ```bibtex
416
+ @misc{{lightonocr2024,
417
+ title={{LightOnOCR: End-to-End Trainable OCR Model}},
418
+ author={{LightOn AI}},
419
+ year={{2024}},
420
+ url={{https://huggingface.co/blog/lightonai/lightonocr}}
421
+ }}
422
+ ```
423
+
424
+ ## License
425
+
426
+ This model is released under the MIT license.
427
+
428
+ ---
429
+
430
+ *Generated on {datetime.now().strftime("%Y-%m-%d")} using [UV Scripts](https://huggingface.co/uv-scripts)*
431
+ """
432
+
433
+ return content
434
+
435
+
436
+ def main():
437
+ parser = argparse.ArgumentParser(
438
+ description="Fine-tune LightOnOCR on OCR datasets",
439
+ formatter_class=argparse.RawDescriptionHelpFormatter,
440
+ )
441
+
442
+ # Dataset arguments
443
+ parser.add_argument(
444
+ "--dataset-id",
445
+ type=str,
446
+ default="HuggingFaceM4/FineVision",
447
+ help="HuggingFace dataset ID",
448
+ )
449
+ parser.add_argument(
450
+ "--subset",
451
+ type=str,
452
+ default="iam",
453
+ choices=["iam", "olmOCR-mix-0225-books", "olmOCR-mix-0225-documents"],
454
+ help="Dataset subset to use (for FineVision)",
455
+ )
456
+ parser.add_argument(
457
+ "--train-split",
458
+ type=str,
459
+ default="train[:85%]",
460
+ help="Training split specification",
461
+ )
462
+ parser.add_argument(
463
+ "--val-split",
464
+ type=str,
465
+ default="train[85%:95%]",
466
+ help="Validation split specification",
467
+ )
468
+ parser.add_argument(
469
+ "--test-split", type=str, default="train[95%:]", help="Test split specification"
470
+ )
471
+
472
+ # Model arguments
473
+ parser.add_argument(
474
+ "--model-id",
475
+ type=str,
476
+ default="lightonai/LightOnOCR-1B-1025",
477
+ help="Base model ID",
478
+ )
479
+ parser.add_argument(
480
+ "--freeze-vision", action="store_true", help="Freeze vision encoder"
481
+ )
482
+ parser.add_argument(
483
+ "--freeze-language", action="store_true", help="Freeze language model"
484
+ )
485
+ parser.add_argument(
486
+ "--freeze-projection",
487
+ action="store_true",
488
+ help="Freeze vision projection layer",
489
+ )
490
+
491
+ # Training arguments
492
+ parser.add_argument(
493
+ "--output-dir", type=str, required=True, help="Directory to save the model"
494
+ )
495
+ parser.add_argument(
496
+ "--epochs", type=int, default=2, help="Number of training epochs"
497
+ )
498
+ parser.add_argument(
499
+ "--batch-size", type=int, default=4, help="Training batch size per device"
500
+ )
501
+ parser.add_argument(
502
+ "--gradient-accumulation",
503
+ type=int,
504
+ default=4,
505
+ help="Gradient accumulation steps",
506
+ )
507
+ parser.add_argument(
508
+ "--learning-rate", type=float, default=6e-5, help="Learning rate"
509
+ )
510
+ parser.add_argument(
511
+ "--warmup-steps", type=int, default=10, help="Number of warmup steps"
512
+ )
513
+ parser.add_argument(
514
+ "--eval-steps", type=int, default=50, help="Evaluation interval (in steps)"
515
+ )
516
+ parser.add_argument(
517
+ "--save-steps",
518
+ type=int,
519
+ default=500,
520
+ help="Save checkpoint interval (in steps)",
521
+ )
522
+ parser.add_argument(
523
+ "--max-length", type=int, default=1024, help="Maximum sequence length"
524
+ )
525
+ parser.add_argument(
526
+ "--longest-edge", type=int, default=700, help="Longest edge for image resizing"
527
+ )
528
+
529
+ # Evaluation arguments
530
+ parser.add_argument(
531
+ "--eval-samples", type=int, default=100, help="Number of samples for evaluation"
532
+ )
533
+ parser.add_argument(
534
+ "--eval-batch-size", type=int, default=8, help="Batch size for evaluation"
535
+ )
536
+ parser.add_argument(
537
+ "--skip-base-eval", action="store_true", help="Skip base model evaluation"
538
+ )
539
+
540
+ # Hub arguments
541
+ parser.add_argument(
542
+ "--hub-model-id", type=str, help="HuggingFace Hub model ID for pushing"
543
+ )
544
+ parser.add_argument(
545
+ "--push-to-hub", action="store_true", help="Push model to HuggingFace Hub"
546
+ )
547
+ parser.add_argument("--hf-token", type=str, help="HuggingFace API token")
548
+ parser.add_argument(
549
+ "--private", action="store_true", help="Make the model private on Hub"
550
+ )
551
+
552
+ # Other arguments
553
+ parser.add_argument(
554
+ "--max-samples", type=int, help="Limit number of training samples (for testing)"
555
+ )
556
+ parser.add_argument("--seed", type=int, default=42, help="Random seed")
557
+
558
+ args = parser.parse_args()
559
+
560
+ # Check GPU availability
561
+ if not torch.cuda.is_available():
562
+ logger.error("CUDA is not available. This script requires a GPU.")
563
+ logger.info("To run on HF Jobs with GPU:")
564
+ logger.info(
565
+ f"hf jobs run --gpu l4x1 uv run {__file__} --dataset-id {args.dataset_id} --output-dir {args.output_dir}"
566
+ )
567
+ sys.exit(1)
568
+
569
+ device = "cuda"
570
+ logger.info(f"Using GPU: {torch.cuda.get_device_name(0)}")
571
+
572
+ # Set environment variables for better performance
573
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
574
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
575
+ torch.set_float32_matmul_precision("high")
576
+
577
+ # Login to HuggingFace if needed
578
+ if args.push_to_hub:
579
+ token = args.hf_token or os.environ.get("HF_TOKEN")
580
+ if token:
581
+ login(token=token)
582
+ else:
583
+ logger.error("HF_TOKEN required for push_to_hub")
584
+ sys.exit(1)
585
+
586
+ # Load dataset
587
+ logger.info(f"Loading dataset: {args.dataset_id}/{args.subset}")
588
+ train_ds = load_dataset(args.dataset_id, args.subset, split=args.train_split)
589
+ val_ds = load_dataset(args.dataset_id, args.subset, split=args.val_split)
590
+ test_ds = load_dataset(args.dataset_id, args.subset, split=args.test_split)
591
+
592
+ # Limit samples if requested
593
+ if args.max_samples:
594
+ train_ds = train_ds.select(range(min(args.max_samples, len(train_ds))))
595
+ logger.info(f"Limited training to {len(train_ds)} samples")
596
+
597
+ logger.info(
598
+ f"Dataset sizes - Train: {len(train_ds)}, Val: {len(val_ds)}, Test: {len(test_ds)}"
599
+ )
600
+
601
+ # Load processor
602
+ logger.info(f"Loading processor from {args.model_id}")
603
+ processor = AutoProcessor.from_pretrained(args.model_id)
604
+ processor.tokenizer.padding_side = "left"
605
+
606
+ # Load model
607
+ logger.info(f"Loading model from {args.model_id}")
608
+ model = LightOnOCRForConditionalGeneration.from_pretrained(
609
+ args.model_id,
610
+ torch_dtype=torch.bfloat16,
611
+ attn_implementation="sdpa",
612
+ device_map="auto",
613
+ ).to(device)
614
+
615
+ # Freeze components as requested
616
+ freeze_config = {
617
+ "vision_encoder": args.freeze_vision,
618
+ "language_model": args.freeze_language,
619
+ "vision_projection": args.freeze_projection,
620
+ }
621
+
622
+ if args.freeze_vision:
623
+ for param in model.model.vision_encoder.parameters():
624
+ param.requires_grad = False
625
+ logger.info("Vision encoder frozen")
626
+
627
+ if args.freeze_language:
628
+ for param in model.model.language_model.parameters():
629
+ param.requires_grad = False
630
+ logger.info("Language model frozen")
631
+
632
+ if args.freeze_projection:
633
+ for param in model.model.vision_projection.parameters():
634
+ param.requires_grad = False
635
+ logger.info("Vision projection frozen")
636
+
637
+ # Count trainable parameters
638
+ total_params = sum(p.numel() for p in model.parameters())
639
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
640
+ logger.info(f"Total parameters: {total_params:,}")
641
+ logger.info(
642
+ f"Trainable parameters: {trainable_params:,} ({100 * trainable_params / total_params:.2f}%)"
643
+ )
644
+
645
+ # Evaluate base model
646
+ base_metrics = {
647
+ "cer": 0.0,
648
+ "wer": 0.0,
649
+ "perfect_matches": 0,
650
+ "total_samples": args.eval_samples,
651
+ }
652
+ if not args.skip_base_eval:
653
+ logger.info("\n" + "=" * 80)
654
+ logger.info("EVALUATING BASE MODEL")
655
+ logger.info("=" * 80)
656
+ base_metrics = evaluate_model(
657
+ model,
658
+ processor,
659
+ test_ds,
660
+ num_samples=args.eval_samples,
661
+ batch_size=args.eval_batch_size,
662
+ device=device,
663
+ description="Base model",
664
+ )
665
+ torch.cuda.empty_cache()
666
+
667
+ # Prepare data collator
668
+ data_collator = OCRDataCollator(
669
+ processor, max_length=args.max_length, longest_edge=args.longest_edge
670
+ )
671
+
672
+ # Setup training arguments
673
+ training_args = TrainingArguments(
674
+ output_dir=args.output_dir,
675
+ num_train_epochs=args.epochs,
676
+ per_device_train_batch_size=args.batch_size,
677
+ per_device_eval_batch_size=args.eval_batch_size,
678
+ gradient_accumulation_steps=args.gradient_accumulation,
679
+ learning_rate=args.learning_rate,
680
+ weight_decay=0.0,
681
+ logging_steps=50,
682
+ eval_strategy="steps",
683
+ eval_steps=args.eval_steps,
684
+ save_strategy="steps",
685
+ save_steps=args.save_steps,
686
+ save_total_limit=2,
687
+ load_best_model_at_end=True,
688
+ metric_for_best_model="eval_loss",
689
+ bf16=True,
690
+ fp16=False,
691
+ remove_unused_columns=False,
692
+ dataloader_pin_memory=False,
693
+ gradient_checkpointing=True,
694
+ optim="adamw_torch_fused" if torch.cuda.is_available() else "adamw_torch",
695
+ warmup_steps=args.warmup_steps,
696
+ lr_scheduler_type="linear",
697
+ push_to_hub=args.push_to_hub,
698
+ hub_model_id=args.hub_model_id,
699
+ hub_private_repo=args.private,
700
+ )
701
+
702
+ # Use smaller validation set for faster evaluation
703
+ val_ds_small = val_ds.select(range(min(100, len(val_ds))))
704
+
705
+ # Create trainer
706
+ trainer = Trainer(
707
+ model=model,
708
+ args=training_args,
709
+ train_dataset=train_ds,
710
+ eval_dataset=val_ds_small,
711
+ data_collator=data_collator,
712
+ )
713
+
714
+ # Train
715
+ logger.info("\n" + "=" * 80)
716
+ logger.info("STARTING TRAINING")
717
+ logger.info("=" * 80)
718
+ logger.info(f"Training samples: {len(train_ds)}")
719
+ logger.info(f"Validation samples: {len(val_ds_small)}")
720
+ logger.info(f"Effective batch size: {args.batch_size * args.gradient_accumulation}")
721
+
722
+ trainer.train()
723
+
724
+ # Save model
725
+ logger.info("Saving model and processor...")
726
+ trainer.save_model(args.output_dir)
727
+ processor.save_pretrained(args.output_dir)
728
+
729
+ # Evaluate fine-tuned model
730
+ logger.info("\n" + "=" * 80)
731
+ logger.info("EVALUATING FINE-TUNED MODEL")
732
+ logger.info("=" * 80)
733
+ finetuned_metrics = evaluate_model(
734
+ model,
735
+ processor,
736
+ test_ds,
737
+ num_samples=args.eval_samples,
738
+ batch_size=args.eval_batch_size,
739
+ device=device,
740
+ description="Fine-tuned model",
741
+ )
742
+
743
+ # Show comparison
744
+ if not args.skip_base_eval:
745
+ logger.info("\n" + "=" * 80)
746
+ logger.info("PERFORMANCE COMPARISON")
747
+ logger.info("=" * 80)
748
+ logger.info(
749
+ f"{'Metric':<20} {'Base':<12} {'Fine-tuned':<12} {'Improvement':<12}"
750
+ )
751
+ logger.info("-" * 56)
752
+ logger.info(
753
+ f"{'CER (%)':<20} {base_metrics['cer']:<12.2f} {finetuned_metrics['cer']:<12.2f} {base_metrics['cer'] - finetuned_metrics['cer']:+.2f}"
754
+ )
755
+ logger.info(
756
+ f"{'WER (%)':<20} {base_metrics['wer']:<12.2f} {finetuned_metrics['wer']:<12.2f} {base_metrics['wer'] - finetuned_metrics['wer']:+.2f}"
757
+ )
758
+ logger.info(
759
+ f"{'Perfect Matches':<20} {base_metrics['perfect_matches']:<12} {finetuned_metrics['perfect_matches']:<12} {finetuned_metrics['perfect_matches'] - base_metrics['perfect_matches']:+d}"
760
+ )
761
+ logger.info("=" * 80)
762
+
763
+ # Create and save model card
764
+ if args.hub_model_id or args.push_to_hub:
765
+ model_id = args.hub_model_id or f"{args.output_dir.split('/')[-1]}"
766
+ logger.info("Creating model card with metrics...")
767
+
768
+ model_card_content = create_model_card_content(
769
+ model_id=model_id,
770
+ dataset_id=args.dataset_id,
771
+ subset=args.subset,
772
+ base_metrics=base_metrics,
773
+ finetuned_metrics=finetuned_metrics,
774
+ training_args=training_args,
775
+ freeze_config=freeze_config,
776
+ )
777
+
778
+ # Save model card
779
+ model_card_path = Path(args.output_dir) / "README.md"
780
+ model_card_path.write_text(model_card_content)
781
+ logger.info(f"Model card saved to {model_card_path}")
782
+
783
+ if args.push_to_hub:
784
+ logger.info(f"Pushing model to Hub: {args.hub_model_id}")
785
+ trainer.push_to_hub()
786
+ logger.info(
787
+ f"✅ Model available at: https://huggingface.co/{args.hub_model_id}"
788
+ )
789
+
790
+ logger.info("\n✅ Training complete!")
791
+ logger.info(f"Model saved to: {args.output_dir}")
792
+
793
+ # Print example command for inference
794
+ logger.info("\n" + "=" * 80)
795
+ logger.info("To use the fine-tuned model:")
796
+ logger.info("=" * 80)
797
+ logger.info(f"""
798
+ from transformers import AutoProcessor, LightOnOCRForConditionalGeneration
799
+ from PIL import Image
800
+
801
+ model = LightOnOCRForConditionalGeneration.from_pretrained("{args.output_dir}")
802
+ processor = AutoProcessor.from_pretrained("{args.output_dir}")
803
+ # ... rest of inference code
804
+ """)
805
+
806
+
807
+ if __name__ == "__main__":
808
+ if len(sys.argv) == 1:
809
+ print("LightOnOCR Fine-tuning Script\n")
810
+ print("Examples:")
811
+ print(" # Basic fine-tuning:")
812
+ print(
813
+ " uv run lightonocr-finetune.py --dataset-id HuggingFaceM4/FineVision --subset iam --output-dir ./model\n"
814
+ )
815
+ print(" # With frozen components:")
816
+ print(
817
+ " uv run lightonocr-finetune.py --freeze-language --output-dir ./model\n"
818
+ )
819
+ print(" # Push to Hub:")
820
+ print(
821
+ " uv run lightonocr-finetune.py --hub-model-id username/model --push-to-hub\n"
822
+ )
823
+ print(" # Run on HF Jobs:")
824
+ print(
825
+ " hf jobs run --gpu l4x1 uv run lightonocr-finetune.py --output-dir ./model"
826
+ )
827
+ sys.exit(0)
828
+
829
+ main()