Upload vlm-streaming-sft-unsloth-qwen.py with huggingface_hub
Browse files
vlm-streaming-sft-unsloth-qwen.py
CHANGED
|
@@ -13,35 +13,34 @@
|
|
| 13 |
Fine-tune Vision Language Models using Unsloth optimizations.
|
| 14 |
|
| 15 |
Uses Unsloth for ~60% less VRAM and 2x faster training.
|
| 16 |
-
Supports
|
| 17 |
|
| 18 |
-
|
| 19 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 20 |
-
--
|
| 21 |
-
--num-samples 500 \
|
| 22 |
--eval-split 0.2 \
|
| 23 |
-
--output-repo your-username/vlm-
|
| 24 |
|
| 25 |
-
Run on HF Jobs:
|
| 26 |
-
hf jobs uv run --flavor a100-large --secrets HF_TOKEN -- \
|
| 27 |
https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \
|
| 28 |
-
--
|
| 29 |
-
--num-samples 500 \
|
| 30 |
--eval-split 0.2 \
|
|
|
|
| 31 |
--output-repo your-username/vlm-finetuned
|
| 32 |
|
| 33 |
-
|
| 34 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 35 |
--streaming \
|
| 36 |
--max-steps 500 \
|
| 37 |
--output-repo your-username/vlm-finetuned
|
| 38 |
|
| 39 |
-
|
| 40 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 41 |
-
--
|
|
|
|
| 42 |
--eval-split 0.2 \
|
| 43 |
-
--output-repo your-username/vlm-
|
| 44 |
-
--trackio-space your-username/trackio
|
| 45 |
"""
|
| 46 |
|
| 47 |
import argparse
|
|
@@ -115,11 +114,17 @@ Examples:
|
|
| 115 |
)
|
| 116 |
|
| 117 |
# Training config
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
parser.add_argument(
|
| 119 |
"--max-steps",
|
| 120 |
type=int,
|
| 121 |
-
default=
|
| 122 |
-
help="Training steps (default:
|
| 123 |
)
|
| 124 |
parser.add_argument(
|
| 125 |
"--batch-size",
|
|
@@ -204,6 +209,25 @@ Examples:
|
|
| 204 |
def main():
|
| 205 |
args = parse_args()
|
| 206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
print("=" * 70)
|
| 208 |
print("VLM Fine-tuning with Unsloth")
|
| 209 |
print("=" * 70)
|
|
@@ -218,7 +242,7 @@ def main():
|
|
| 218 |
f" Eval split: {args.eval_split if args.eval_split > 0 else '(disabled)'}"
|
| 219 |
)
|
| 220 |
print(f" Seed: {args.seed}")
|
| 221 |
-
print(f"
|
| 222 |
print(
|
| 223 |
f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
|
| 224 |
)
|
|
@@ -357,17 +381,30 @@ def main():
|
|
| 357 |
# Enable training mode
|
| 358 |
FastVisionModel.for_training(model)
|
| 359 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
training_config = SFTConfig(
|
| 361 |
output_dir=args.save_local,
|
| 362 |
per_device_train_batch_size=args.batch_size,
|
| 363 |
gradient_accumulation_steps=args.gradient_accumulation,
|
| 364 |
warmup_steps=5, # Per notebook (not warmup_ratio)
|
| 365 |
-
|
|
|
|
| 366 |
learning_rate=args.learning_rate,
|
| 367 |
-
logging_steps=
|
| 368 |
optim="adamw_8bit", # Per notebook
|
| 369 |
weight_decay=0.001,
|
| 370 |
-
lr_scheduler_type="
|
| 371 |
seed=args.seed,
|
| 372 |
# VLM-specific settings (required for Unsloth)
|
| 373 |
remove_unused_columns=False,
|
|
@@ -376,14 +413,19 @@ def main():
|
|
| 376 |
max_length=args.max_seq_length,
|
| 377 |
# Logging
|
| 378 |
report_to="trackio",
|
| 379 |
-
run_name=
|
| 380 |
)
|
| 381 |
|
| 382 |
# Add evaluation config if eval is enabled
|
| 383 |
if eval_data:
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 387 |
|
| 388 |
# Use older 'tokenizer=' parameter (not processing_class) - required for Unsloth VLM
|
| 389 |
trainer = SFTTrainer(
|
|
@@ -396,14 +438,21 @@ def main():
|
|
| 396 |
)
|
| 397 |
|
| 398 |
# 4. Train
|
| 399 |
-
print(f"\n[4/5] Training for {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
start = time.time()
|
| 401 |
|
| 402 |
train_result = trainer.train()
|
| 403 |
|
| 404 |
train_time = time.time() - start
|
|
|
|
|
|
|
|
|
|
| 405 |
print(f"\nTraining completed in {train_time / 60:.1f} minutes")
|
| 406 |
-
print(f" Speed: {
|
| 407 |
|
| 408 |
# Print training metrics
|
| 409 |
if train_result.metrics:
|
|
@@ -457,25 +506,25 @@ if __name__ == "__main__":
|
|
| 457 |
print("\nFeatures:")
|
| 458 |
print(" - ~60% less VRAM with Unsloth optimizations")
|
| 459 |
print(" - 2x faster training vs standard methods")
|
|
|
|
| 460 |
print(" - Optional evaluation to detect overfitting")
|
| 461 |
print(" - Trackio integration for monitoring")
|
| 462 |
-
print("
|
| 463 |
-
print("\nExample usage (with evaluation):")
|
| 464 |
print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
|
| 465 |
-
print(" --
|
| 466 |
-
print(" --num-samples 500 \\")
|
| 467 |
print(" --eval-split 0.2 \\")
|
| 468 |
print(" --output-repo your-username/vlm-finetuned")
|
| 469 |
-
print("\nHF Jobs example:")
|
| 470 |
-
print(
|
|
|
|
|
|
|
| 471 |
print(
|
| 472 |
" https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \\"
|
| 473 |
)
|
| 474 |
-
print(" --
|
| 475 |
-
print(" --num-samples 500 \\")
|
| 476 |
print(" --eval-split 0.2 \\")
|
| 477 |
print(" --output-repo your-username/vlm-finetuned")
|
| 478 |
-
print("\
|
| 479 |
print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
|
| 480 |
print(" --streaming \\")
|
| 481 |
print(" --max-steps 500 \\")
|
|
|
|
| 13 |
Fine-tune Vision Language Models using Unsloth optimizations.
|
| 14 |
|
| 15 |
Uses Unsloth for ~60% less VRAM and 2x faster training.
|
| 16 |
+
Supports epoch-based or step-based training with optional eval split.
|
| 17 |
|
| 18 |
+
Epoch-based training (recommended for full datasets):
|
| 19 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 20 |
+
--num-epochs 1 \
|
|
|
|
| 21 |
--eval-split 0.2 \
|
| 22 |
+
--output-repo your-username/vlm-finetuned
|
| 23 |
|
| 24 |
+
Run on HF Jobs (1 epoch with eval):
|
| 25 |
+
hf jobs uv run --flavor a100-large --secrets HF_TOKEN --timeout 4h -- \
|
| 26 |
https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \
|
| 27 |
+
--num-epochs 1 \
|
|
|
|
| 28 |
--eval-split 0.2 \
|
| 29 |
+
--trackio-space your-username/trackio \
|
| 30 |
--output-repo your-username/vlm-finetuned
|
| 31 |
|
| 32 |
+
Step-based training (for streaming or quick tests):
|
| 33 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 34 |
--streaming \
|
| 35 |
--max-steps 500 \
|
| 36 |
--output-repo your-username/vlm-finetuned
|
| 37 |
|
| 38 |
+
Quick test with limited samples:
|
| 39 |
uv run vlm-streaming-sft-unsloth-qwen.py \
|
| 40 |
+
--num-samples 500 \
|
| 41 |
+
--num-epochs 2 \
|
| 42 |
--eval-split 0.2 \
|
| 43 |
+
--output-repo your-username/vlm-test
|
|
|
|
| 44 |
"""
|
| 45 |
|
| 46 |
import argparse
|
|
|
|
| 114 |
)
|
| 115 |
|
| 116 |
# Training config
|
| 117 |
+
parser.add_argument(
|
| 118 |
+
"--num-epochs",
|
| 119 |
+
type=float,
|
| 120 |
+
default=None,
|
| 121 |
+
help="Number of epochs (default: None). Use instead of --max-steps for non-streaming mode.",
|
| 122 |
+
)
|
| 123 |
parser.add_argument(
|
| 124 |
"--max-steps",
|
| 125 |
type=int,
|
| 126 |
+
default=None,
|
| 127 |
+
help="Training steps (default: None). Required for streaming mode, optional otherwise.",
|
| 128 |
)
|
| 129 |
parser.add_argument(
|
| 130 |
"--batch-size",
|
|
|
|
| 209 |
def main():
|
| 210 |
args = parse_args()
|
| 211 |
|
| 212 |
+
# Validate epochs/steps configuration
|
| 213 |
+
if args.streaming and args.num_epochs:
|
| 214 |
+
logger.error(
|
| 215 |
+
"Cannot use --num-epochs with --streaming. Use --max-steps instead."
|
| 216 |
+
)
|
| 217 |
+
sys.exit(1)
|
| 218 |
+
if args.streaming and not args.max_steps:
|
| 219 |
+
args.max_steps = 500 # Default for streaming
|
| 220 |
+
logger.info("Using default --max-steps=500 for streaming mode")
|
| 221 |
+
if not args.streaming and not args.num_epochs and not args.max_steps:
|
| 222 |
+
args.num_epochs = 1 # Default to 1 epoch for non-streaming
|
| 223 |
+
logger.info("Using default --num-epochs=1 for non-streaming mode")
|
| 224 |
+
|
| 225 |
+
# Determine training duration display
|
| 226 |
+
if args.num_epochs:
|
| 227 |
+
duration_str = f"{args.num_epochs} epoch(s)"
|
| 228 |
+
else:
|
| 229 |
+
duration_str = f"{args.max_steps} steps"
|
| 230 |
+
|
| 231 |
print("=" * 70)
|
| 232 |
print("VLM Fine-tuning with Unsloth")
|
| 233 |
print("=" * 70)
|
|
|
|
| 242 |
f" Eval split: {args.eval_split if args.eval_split > 0 else '(disabled)'}"
|
| 243 |
)
|
| 244 |
print(f" Seed: {args.seed}")
|
| 245 |
+
print(f" Training: {duration_str}")
|
| 246 |
print(
|
| 247 |
f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}"
|
| 248 |
)
|
|
|
|
| 381 |
# Enable training mode
|
| 382 |
FastVisionModel.for_training(model)
|
| 383 |
|
| 384 |
+
# Calculate steps per epoch for logging/eval intervals
|
| 385 |
+
effective_batch = args.batch_size * args.gradient_accumulation
|
| 386 |
+
steps_per_epoch = len(train_data) // effective_batch
|
| 387 |
+
|
| 388 |
+
# Determine run name and logging steps
|
| 389 |
+
if args.num_epochs:
|
| 390 |
+
run_name = f"vlm-sft-{args.num_epochs}ep"
|
| 391 |
+
logging_steps = max(1, steps_per_epoch // 10) # ~10 logs per epoch
|
| 392 |
+
else:
|
| 393 |
+
run_name = f"vlm-sft-{args.max_steps}steps"
|
| 394 |
+
logging_steps = max(1, args.max_steps // 20)
|
| 395 |
+
|
| 396 |
training_config = SFTConfig(
|
| 397 |
output_dir=args.save_local,
|
| 398 |
per_device_train_batch_size=args.batch_size,
|
| 399 |
gradient_accumulation_steps=args.gradient_accumulation,
|
| 400 |
warmup_steps=5, # Per notebook (not warmup_ratio)
|
| 401 |
+
num_train_epochs=args.num_epochs if args.num_epochs else 1,
|
| 402 |
+
max_steps=args.max_steps if args.max_steps else -1, # -1 means use epochs
|
| 403 |
learning_rate=args.learning_rate,
|
| 404 |
+
logging_steps=logging_steps,
|
| 405 |
optim="adamw_8bit", # Per notebook
|
| 406 |
weight_decay=0.001,
|
| 407 |
+
lr_scheduler_type="cosine" if args.num_epochs else "linear",
|
| 408 |
seed=args.seed,
|
| 409 |
# VLM-specific settings (required for Unsloth)
|
| 410 |
remove_unused_columns=False,
|
|
|
|
| 413 |
max_length=args.max_seq_length,
|
| 414 |
# Logging
|
| 415 |
report_to="trackio",
|
| 416 |
+
run_name=run_name,
|
| 417 |
)
|
| 418 |
|
| 419 |
# Add evaluation config if eval is enabled
|
| 420 |
if eval_data:
|
| 421 |
+
if args.num_epochs:
|
| 422 |
+
# For epoch-based training, eval at end of each epoch
|
| 423 |
+
training_config.eval_strategy = "epoch"
|
| 424 |
+
print(" Evaluation enabled: every epoch")
|
| 425 |
+
else:
|
| 426 |
+
training_config.eval_strategy = "steps"
|
| 427 |
+
training_config.eval_steps = max(1, args.max_steps // 5)
|
| 428 |
+
print(f" Evaluation enabled: every {training_config.eval_steps} steps")
|
| 429 |
|
| 430 |
# Use older 'tokenizer=' parameter (not processing_class) - required for Unsloth VLM
|
| 431 |
trainer = SFTTrainer(
|
|
|
|
| 438 |
)
|
| 439 |
|
| 440 |
# 4. Train
|
| 441 |
+
print(f"\n[4/5] Training for {duration_str}...")
|
| 442 |
+
if args.num_epochs:
|
| 443 |
+
print(
|
| 444 |
+
f" (~{steps_per_epoch} steps/epoch, {int(steps_per_epoch * args.num_epochs)} total steps)"
|
| 445 |
+
)
|
| 446 |
start = time.time()
|
| 447 |
|
| 448 |
train_result = trainer.train()
|
| 449 |
|
| 450 |
train_time = time.time() - start
|
| 451 |
+
total_steps = train_result.metrics.get(
|
| 452 |
+
"train_steps", args.max_steps or steps_per_epoch * args.num_epochs
|
| 453 |
+
)
|
| 454 |
print(f"\nTraining completed in {train_time / 60:.1f} minutes")
|
| 455 |
+
print(f" Speed: {total_steps / train_time:.2f} steps/s")
|
| 456 |
|
| 457 |
# Print training metrics
|
| 458 |
if train_result.metrics:
|
|
|
|
| 506 |
print("\nFeatures:")
|
| 507 |
print(" - ~60% less VRAM with Unsloth optimizations")
|
| 508 |
print(" - 2x faster training vs standard methods")
|
| 509 |
+
print(" - Epoch-based or step-based training")
|
| 510 |
print(" - Optional evaluation to detect overfitting")
|
| 511 |
print(" - Trackio integration for monitoring")
|
| 512 |
+
print("\nEpoch-based training (recommended for full datasets):")
|
|
|
|
| 513 |
print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
|
| 514 |
+
print(" --num-epochs 1 \\")
|
|
|
|
| 515 |
print(" --eval-split 0.2 \\")
|
| 516 |
print(" --output-repo your-username/vlm-finetuned")
|
| 517 |
+
print("\nHF Jobs example (1 epoch with eval):")
|
| 518 |
+
print(
|
| 519 |
+
"\n hf jobs uv run --flavor a100-large --secrets HF_TOKEN --timeout 4h -- \\"
|
| 520 |
+
)
|
| 521 |
print(
|
| 522 |
" https://huggingface.co/datasets/uv-scripts/training/raw/main/vlm-streaming-sft-unsloth-qwen.py \\"
|
| 523 |
)
|
| 524 |
+
print(" --num-epochs 1 \\")
|
|
|
|
| 525 |
print(" --eval-split 0.2 \\")
|
| 526 |
print(" --output-repo your-username/vlm-finetuned")
|
| 527 |
+
print("\nStep-based training (for streaming or quick tests):")
|
| 528 |
print("\n uv run vlm-streaming-sft-unsloth-qwen.py \\")
|
| 529 |
print(" --streaming \\")
|
| 530 |
print(" --max-steps 500 \\")
|