EuuIia commited on
Commit
46bb424
·
verified ·
1 Parent(s): 799e694

Update inference_cli.py

Browse files
Files changed (1) hide show
  1. inference_cli.py +79 -360
inference_cli.py CHANGED
@@ -1,6 +1,7 @@
1
  #!/usr/bin/env python3
2
  """
3
  Standalone SeedVR2 Video Upscaler CLI Script
 
4
  """
5
 
6
  import sys
@@ -8,25 +9,26 @@ import os
8
  import argparse
9
  import time
10
  import multiprocessing as mp
11
- # Ensure safe CUDA usage with multiprocessing
12
  if mp.get_start_method(allow_none=True) != 'spawn':
13
  mp.set_start_method('spawn', force=True)
 
14
  # -------------------------------------------------------------
15
- # 1) Gestion VRAM (cudaMallocAsync) déjà en place
16
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
17
 
18
- # 2) Pré-parse de la ligne de commande pour récupérer --cuda_device
19
  _pre_parser = argparse.ArgumentParser(add_help=False)
20
  _pre_parser.add_argument("--cuda_device", type=str, default=None)
21
  _pre_args, _ = _pre_parser.parse_known_args()
22
  if _pre_args.cuda_device is not None:
23
  device_list_env = [x.strip() for x in _pre_args.cuda_device.split(',') if x.strip()!='']
24
  if len(device_list_env) == 1:
25
- # Single GPU: restrict visibility now
26
  os.environ["CUDA_VISIBLE_DEVICES"] = device_list_env[0]
27
 
28
  # -------------------------------------------------------------
29
- # 3) Imports lourds (torch, etc.) après la configuration env
30
  import torch
31
  import cv2
32
  import numpy as np
@@ -34,7 +36,7 @@ from datetime import datetime
34
  from pathlib import Path
35
  from src.utils.downloads import download_weight
36
 
37
- # Add project root to sys.path for src module imports
38
  script_dir = os.path.dirname(os.path.abspath(__file__))
39
  if script_dir not in sys.path:
40
  sys.path.insert(0, script_dir)
@@ -44,16 +46,7 @@ if root_dir not in sys.path:
44
 
45
  def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
46
  """
47
- Extract frames from video and convert to tensor format
48
-
49
- Args:
50
- video_path (str): Path to input video
51
- debug (bool): Enable debug logging
52
- skip_first_frame (bool): Skip the first frame during extraction
53
- load_cap (int): Maximum number of frames to load (None for all)
54
-
55
- Returns:
56
- torch.Tensor: Frames tensor in format [T, H, W, C] (Float16, normalized 0-1)
57
  """
58
  if debug:
59
  print(f"🎬 Extracting frames from video: {video_path}")
@@ -61,12 +54,10 @@ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load
61
  if not os.path.exists(video_path):
62
  raise FileNotFoundError(f"Video file not found: {video_path}")
63
 
64
- # Open video
65
  cap = cv2.VideoCapture(video_path)
66
  if not cap.isOpened():
67
  raise ValueError(f"Cannot open video file: {video_path}")
68
 
69
- # Get video properties
70
  fps = cap.get(cv2.CAP_PROP_FPS)
71
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
72
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
@@ -88,23 +79,14 @@ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load
88
  if not ret:
89
  break
90
 
91
- # Skip first frame if requested
92
  if frame_idx < skip_first_frames:
93
  frame_idx += 1
94
- if debug:
95
- print(f"⏭️ Skipped first frame")
96
  continue
97
 
98
- # Check load cap
99
  if load_cap is not None and load_cap > 0 and frames_loaded >= load_cap:
100
- if debug:
101
- print(f"🔢 Reached load cap of {load_cap} frames")
102
  break
103
 
104
- # Convert BGR to RGB
105
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
106
-
107
- # Convert to float32 and normalize to 0-1
108
  frame = frame.astype(np.float32) / 255.0
109
 
110
  frames.append(frame)
@@ -123,7 +105,6 @@ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load
123
  if debug:
124
  print(f"✅ Extracted {len(frames)} frames")
125
 
126
- # Convert to tensor [T, H, W, C] and cast to Float16 for ComfyUI compatibility
127
  frames_tensor = torch.from_numpy(np.stack(frames)).to(torch.float16)
128
 
129
  if debug:
@@ -134,37 +115,24 @@ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load
134
 
135
  def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
136
  """
137
- Save frames tensor to video file
138
-
139
- Args:
140
- frames_tensor (torch.Tensor): Frames in format [T, H, W, C] (Float16, 0-1)
141
- output_path (str): Output video path
142
- fps (float): Output video FPS
143
- debug (bool): Enable debug logging
144
  """
145
  if debug:
146
  print(f"🎬 Saving {frames_tensor.shape[0]} frames to video: {output_path}")
147
 
148
- # Ensure output directory exists
149
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
150
 
151
- # Convert tensor to numpy and denormalize
152
- frames_np = frames_tensor.cpu().numpy()
153
- frames_np = (frames_np * 255.0).astype(np.uint8)
154
 
155
- # Get video properties
156
  T, H, W, C = frames_np.shape
157
 
158
- # Initialize video writer
159
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
160
  out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
161
 
162
  if not out.isOpened():
163
  raise ValueError(f"Cannot create video writer for: {output_path}")
164
 
165
- # Write frames
166
  for i, frame in enumerate(frames_np):
167
- # Convert RGB to BGR for OpenCV
168
  frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
169
  out.write(frame_bgr)
170
 
@@ -176,43 +144,10 @@ def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
176
  if debug:
177
  print(f"✅ Video saved successfully: {output_path}")
178
 
179
-
180
- def save_frames_to_png(frames_tensor, output_dir, base_name, debug=False):
181
  """
182
- Save frames tensor as sequential PNG images.
183
-
184
- Args:
185
- frames_tensor (torch.Tensor): Frames in format [T, H, W, C] (Float16, 0-1)
186
- output_dir (str): Directory to save PNGs
187
- base_name (str): Base name for output files (without extension)
188
- debug (bool): Enable debug logging
189
  """
190
- if debug:
191
- print(f"🖼️ Saving {frames_tensor.shape[0]} frames as PNGs to directory: {output_dir}")
192
-
193
- # Ensure output directory exists
194
- os.makedirs(output_dir, exist_ok=True)
195
-
196
- # Convert to numpy uint8 RGB
197
- frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
198
- total = frames_np.shape[0]
199
- digits = max(5, len(str(total))) # at least 5 digits
200
-
201
- for idx, frame in enumerate(frames_np):
202
- filename = f"{base_name}_{idx:0{digits}d}.png"
203
- file_path = os.path.join(output_dir, filename)
204
- # Convert RGB to BGR for cv2
205
- frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
206
- cv2.imwrite(file_path, frame_bgr)
207
- if debug and (idx + 1) % 100 == 0:
208
- print(f"💾 Saved {idx + 1}/{total} PNGs")
209
-
210
- if debug:
211
- print(f"✅ PNG saving completed: {total} files in '{output_dir}'")
212
-
213
-
214
- def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None): # Adicionado progress_queue
215
- """Worker que executa o upscaling em uma GPU dedicada."""
216
  os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
217
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
218
 
@@ -222,73 +157,37 @@ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, p
222
 
223
  frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
224
 
225
- # Cria uma função de callback local que envia o progresso para a fila
226
  local_progress_callback = None
227
  if progress_queue:
228
  def callback_wrapper(batch_idx, total_batches, current_frames, message):
229
- progress_queue.put((batch_idx, total_batches, message))
 
230
  local_progress_callback = callback_wrapper
231
 
232
  runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
233
 
 
234
  result_tensor = generation_loop(
235
  runner=runner, images=frames_tensor, cfg_scale=shared_args["cfg_scale"],
236
  seed=shared_args["seed"], res_w=shared_args["res_w"], batch_size=shared_args["batch_size"],
237
  preserve_vram=shared_args["preserve_vram"], temporal_overlap=shared_args["temporal_overlap"],
238
  debug=shared_args["debug"],
239
- progress_callback=local_progress_callback # Passa o callback para o generation_loop
240
  )
 
241
  return_queue.put((proc_idx, result_tensor.cpu().numpy()))
242
-
243
-
244
- def _worker_process1(proc_idx, device_id, frames_np, shared_args, return_queue):
245
- """Worker process that performs upscaling on a slice of frames using a dedicated GPU."""
246
- # 1. Limit CUDA visibility to the chosen GPU BEFORE importing torch-heavy deps
247
- os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
248
- # Keep same cudaMallocAsync setting
249
- os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
250
-
251
- import torch # local import inside subprocess
252
- from src.core.model_manager import configure_runner
253
- from src.core.generation import generation_loop
254
-
255
 
256
- # Reconstruct frames tensor
257
- frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
258
-
259
- # Prepare runner
260
- model_dir = shared_args["model_dir"]
261
- model_name = shared_args["model"]
262
- # ensure model weights present (each process checks but very fast if already downloaded)
263
- if shared_args["debug"]:
264
- print(f"🔄 Configuring runner for device {device_id}")
265
- runner = configure_runner(model_name, model_dir, shared_args["preserve_vram"], shared_args["debug"])
266
-
267
- # Run generation
268
- result_tensor = generation_loop(
269
- runner=runner,
270
- images=frames_tensor,
271
- cfg_scale=shared_args["cfg_scale"],
272
- seed=shared_args["seed"],
273
- res_w=shared_args["res_w"],
274
- batch_size=shared_args["batch_size"],
275
- preserve_vram=shared_args["preserve_vram"],
276
- temporal_overlap=shared_args["temporal_overlap"],
277
- debug=shared_args["debug"],
278
- )
279
-
280
- # Send back result as numpy array to avoid CUDA transfers
281
- return_queue.put((proc_idx, result_tensor.cpu().numpy()))
282
-
283
-
284
- def _gpu_processing(frames_tensor, device_list, args, progress_callback=None): # Adicionado progress_callback
285
- """Divide os frames e os processa em paralelo em múltiplas GPUs."""
286
  num_devices = len(device_list)
287
  chunks = torch.chunk(frames_tensor, num_devices, dim=0)
288
 
289
  manager = mp.Manager()
290
  return_queue = manager.Queue()
291
- progress_queue = manager.Queue() if progress_callback else None # Cria a fila de progresso
292
  workers = []
293
 
294
  shared_args = {
@@ -303,224 +202,66 @@ def _gpu_processing(frames_tensor, device_list, args, progress_callback=None): #
303
  workers.append(p)
304
 
305
  results_np = [None] * num_devices
306
- collected = 0
307
- total_batches_per_worker = -1 # Para calcular o progresso total
308
- while collected < num_devices:
309
- # Verifica as duas filas (resultado e progresso) de forma não-bloqueante
310
- if progress_queue and not progress_queue.empty():
311
- batch_idx, total_batches, message = progress_queue.get()
312
- if total_batches_per_worker == -1: total_batches_per_worker = total_batches
313
- total_progress = (collected + (batch_idx / total_batches_per_worker)) / num_devices
314
- progress_callback(total_progress, desc=f"GPU {collected+1}/{num_devices}: {message}")
315
-
 
 
 
 
 
 
 
 
316
  if not return_queue.empty():
317
  proc_idx, res_np = return_queue.get()
318
  results_np[proc_idx] = res_np
319
- collected += 1
320
-
321
- time.sleep(0.1) # Evita busy-waiting
 
322
 
323
  for p in workers: p.join()
324
 
325
  return torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
326
-
327
-
328
-
329
- def _gpu_processing1(frames_tensor, device_list, args):
330
- """Split frames and process them in parallel on multiple GPUs."""
331
- num_devices = len(device_list)
332
- # split frames tensor along time dimension
333
- chunks = torch.chunk(frames_tensor, num_devices, dim=0)
334
-
335
- manager = mp.Manager()
336
- return_queue = manager.Queue()
337
- workers = []
338
-
339
- shared_args = {
340
- "model": args.model,
341
- "model_dir": args.model_dir if args.model_dir is not None else "./models/SEEDVR2",
342
- "preserve_vram": args.preserve_vram,
343
- "debug": args.debug,
344
- "cfg_scale": 1.0,
345
- "seed": args.seed,
346
- "res_w": args.resolution,
347
- "batch_size": args.batch_size,
348
- "temporal_overlap": 0,
349
- }
350
-
351
- for idx, (device_id, chunk_tensor) in enumerate(zip(device_list, chunks)):
352
- p = mp.Process(
353
- target=_worker_process,
354
- args=(idx, device_id, chunk_tensor.cpu().numpy(), shared_args, return_queue),
355
- )
356
- p.start()
357
- workers.append(p)
358
-
359
- results_np = [None] * num_devices
360
- collected = 0
361
- while collected < num_devices:
362
- proc_idx, res_np = return_queue.get()
363
- results_np[proc_idx] = res_np
364
- collected += 1
365
-
366
- for p in workers:
367
- p.join()
368
-
369
- # Concatenate results in original order
370
- result_tensor = torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
371
- return result_tensor
372
-
373
 
374
  def parse_arguments():
375
- """Parse command line arguments"""
376
  parser = argparse.ArgumentParser(description="SeedVR2 Video Upscaler CLI")
377
 
378
- parser.add_argument("--video_path", type=str, required=True,
379
- help="Path to input video file")
380
- parser.add_argument("--seed", type=int, default=100,
381
- help="Random seed for generation (default: 100)")
382
- parser.add_argument("--resolution", type=int, default=1072,
383
- help="Target resolution of the short side (default: 1072)")
384
- parser.add_argument("--batch_size", type=int, default=1,
385
- help="Number of frames per batch (default: 5)")
386
- parser.add_argument("--model", type=str, default="seedvr2_ema_3b_fp8_e4m3fn.safetensors",
387
- choices=[
388
- "seedvr2_ema_3b_fp16.safetensors",
389
- "seedvr2_ema_3b_fp8_e4m3fn.safetensors",
390
- "seedvr2_ema_7b_fp16.safetensors",
391
- "seedvr2_ema_7b_fp8_e4m3fn.safetensors"
392
- ],
393
- help="Model to use (default: 3B FP8)")
394
- parser.add_argument("--model_dir", type=str, default="seedvr2_models",
395
- help="Directory containing the model files (default: use cache directory)")
396
- parser.add_argument("--skip_first_frames", type=int, default=0,
397
- help="Skip the first frames during processing")
398
- parser.add_argument("--load_cap", type=int, default=0,
399
- help="Maximum number of frames to load from video (default: load all)")
400
- parser.add_argument("--output", type=str, default=None,
401
- help="Output path (default: auto-generated, if output_format is png, it will be a directory)")
402
- parser.add_argument("--output_format", type=str, default="video", choices=["video", "png"],
403
- help="Output format: 'video' (mp4) or 'png' images (default: video)")
404
- parser.add_argument("--preserve_vram", action="store_true",
405
- help="Enable VRAM preservation mode")
406
- parser.add_argument("--debug", action="store_true",
407
- help="Enable debug logging")
408
- parser.add_argument("--cuda_device", type=str, default=None,
409
- help="CUDA device id(s). Single id (e.g., '0') or comma-separated list '0,1' for multi-GPU")
410
 
411
  return parser.parse_args()
412
 
413
-
414
- def main():
415
- """Main CLI function"""
416
- print(f"🚀 SeedVR2 Video Upscaler CLI started at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
417
-
418
- # Parse arguments
419
- args = parse_arguments()
420
-
421
- if args.debug:
422
- print(f"📋 Arguments:")
423
- for key, value in vars(args).items():
424
- print(f" {key}: {value}")
425
-
426
- if args.debug:
427
- # Show actual CUDA device visibility
428
- print(f"🖥️ CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set (all)')}")
429
- if torch.cuda.is_available():
430
- print(f"🖥️ torch.cuda.device_count(): {torch.cuda.device_count()}")
431
- print(f"🖥️ Using device index 0 inside script (mapped to selected GPU)")
432
-
433
- try:
434
- # Ensure --output is a directory when using PNG format
435
- if args.output_format == "png":
436
- output_path_obj = Path(args.output)
437
- if output_path_obj.suffix: # an extension is present, strip it
438
- args.output = str(output_path_obj.with_suffix(''))
439
-
440
- if args.debug:
441
- print(f"📁 Output will be saved to: {args.output}")
442
-
443
- # Extract frames from video
444
- print(f"🎬 Extracting frames from video...")
445
- start_time = time.time()
446
- frames_tensor, original_fps = extract_frames_from_video(
447
- args.video_path,
448
- args.debug,
449
- args.skip_first_frames,
450
- args.load_cap
451
- )
452
-
453
- if args.debug:
454
- print(f"🔄 Frame extraction time: {time.time() - start_time:.2f}s")
455
- # print(f"📊 Initial VRAM: {torch.cuda.memory_allocated() / 1024**3:.2f}GB") # may initialize cuda
456
-
457
- # Parse GPU list
458
- device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
459
- if args.debug:
460
- print(f"🚀 Using devices: {device_list}")
461
- processing_start = time.time()
462
- download_weight(args.model, args.model_dir)
463
- result = _gpu_processing(frames_tensor, device_list, args)
464
- generation_time = time.time() - processing_start
465
-
466
- if args.debug:
467
- print(f"🔄 Generation time: {generation_time:.2f}s")
468
- print(f"📊 Peak VRAM usage: {torch.cuda.max_memory_allocated() / 1024**3:.2f}GB")
469
- print(f"📊 Result shape: {result.shape}, dtype: {result.dtype}")
470
-
471
- # After generation_time calculation, choose saving method
472
- if args.output_format == "png":
473
- # Ensure output treated as directory
474
- output_dir = args.output
475
- base_name = Path(args.video_path).stem + "_upscaled"
476
- if args.debug:
477
- print(f"🖼️ Saving PNG frames to directory: {output_dir}")
478
- save_start = time.time()
479
- save_frames_to_png(result, output_dir, base_name, args.debug)
480
- if args.debug:
481
- print(f"🔄 Save time: {time.time() - save_start:.2f}s")
482
- else:
483
- # Save video
484
- if args.debug:
485
- print(f"💾 Saving upscaled video to: {args.output}")
486
- save_start = time.time()
487
- save_frames_to_video(result, args.output, original_fps, args.debug)
488
- if args.debug:
489
- print(f"🔄 Save time: {time.time() - save_start:.2f}s")
490
-
491
- total_time = time.time() - start_time
492
- print(f"✅ Upscaling completed successfully!")
493
- if args.output_format == "png":
494
- print(f"📁 PNG frames saved in directory: {args.output}")
495
- else:
496
- print(f"📁 Output saved to video: {args.output}")
497
- print(f"🕒 Total processing time: {total_time:.2f}s")
498
- print(f"⚡ Average FPS: {len(frames_tensor) / generation_time:.2f} frames/sec")
499
-
500
- except Exception as e:
501
- print(f"❌ Error during processing: {e}")
502
- import traceback
503
- traceback.print_exc()
504
- sys.exit(1)
505
-
506
- finally:
507
- print(f"🧹 Process {os.getpid()} terminating - VRAM will be automatically freed")
508
-
509
-
510
-
511
-
512
  def run_inference_logic(args, progress_callback=None):
513
  """
514
- Função principal que executa o pipeline de upscaling.
515
- Pode ser chamada tanto pelo CLI quanto por outra parte do código.
516
- 'args' pode ser um objeto argparse ou qualquer objeto com atributos correspondentes.
517
  """
518
  if args.debug:
519
- print(f"📋 Argumentos da Lógica de Inferência:")
520
- for key, value in vars(args).items():
521
- print(f" {key}: {value}")
522
 
523
- # 1. Extrair Frames
524
  print("🎬 Extraindo frames do vídeo...")
525
  start_time = time.time()
526
  frames_tensor, original_fps = extract_frames_from_video(
@@ -528,13 +269,7 @@ def run_inference_logic(args, progress_callback=None):
528
  )
529
  if args.debug:
530
  print(f"🔄 Tempo de extração de frames: {time.time() - start_time:.2f}s")
531
-
532
- # 2. Preparar e Executar a Inferência (Multi-GPU)
533
- # ATENÇÃO: A lógica Multi-GPU com `multiprocessing` é complexa de passar um callback de progresso.
534
- # Para simplificar e garantir o funcionamento, vamos focar em single-process/multi-GPU.
535
- # A função `_gpu_processing` já chama `generation_loop`, que pode aceitar um callback.
536
- # Precisamos garantir que ele seja passado adiante.
537
-
538
  device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
539
  if args.debug:
540
  print(f"🚀 Usando dispositivos: {device_list}")
@@ -542,54 +277,38 @@ def run_inference_logic(args, progress_callback=None):
542
  processing_start = time.time()
543
  download_weight(args.model, args.model_dir)
544
 
545
- # MODIFICAÇÃO: A função _gpu_processing deve ser ajustada para aceitar e passar o callback
546
- # No entanto, como _gpu_processing usa multiprocessing, passar um callback de Gradio
547
- # é complexo. Uma abordagem mais simples é remover a camada de multiprocessing por enquanto
548
- # e chamar a lógica de inferência principal diretamente se estivermos em modo de API.
549
- # Por agora, vamos assumir que o `_gpu_processing` lida com isso internamente.
550
- # A maneira mais fácil de simular progresso aqui é pelo tempo.
551
-
552
- # Esta chamada precisa ser investigada para passar o callback adiante.
553
- # Por enquanto, o progresso virá antes e depois desta chamada.
554
- result_tensor = _gpu_processing(frames_tensor, device_list, args) # Esta chamada é bloqueante
555
 
556
  generation_time = time.time() - processing_start
557
  if args.debug:
558
  print(f"🔄 Tempo de Geração: {generation_time:.2f}s")
559
  print(f"📊 Resultado: {result_tensor.shape}, dtype: {result_tensor.dtype}")
560
 
561
- # 3. Retornar o resultado em memória
562
  return result_tensor, original_fps, generation_time, len(frames_tensor)
563
 
564
-
565
-
566
-
567
- # FUNÇÃO MAIN ORIGINAL (agora um wrapper)
568
  def main():
569
- """Função principal do CLI"""
 
 
570
  print(f"🚀 SeedVR2 Video Upscaler CLI iniciado às {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
571
-
572
  args = parse_arguments()
573
-
574
  try:
575
- # Chama a nova função de lógica
576
  result_tensor, original_fps, _, _ = run_inference_logic(args)
577
-
578
- # A parte de salvar o arquivo permanece apenas para o modo CLI
579
  print(f"💾 Salvando vídeo em: {args.output}")
580
- save_start = time.time()
581
  save_frames_to_video(result_tensor, args.output, original_fps, args.debug)
582
- if args.debug:
583
- print(f"🔄 Tempo de salvamento: {time.time() - save_start:.2f}s")
584
-
585
- print("✅ Upscaling CLI concluído com sucesso!")
586
 
587
  except Exception as e:
588
- print(f"❌ Erro durante o processamento: {e}")
589
  import traceback
590
  traceback.print_exc()
591
  sys.exit(1)
592
 
593
- # Ponto de entrada para execução via linha de comando
594
  if __name__ == "__main__":
595
- main()
 
1
  #!/usr/bin/env python3
2
  """
3
  Standalone SeedVR2 Video Upscaler CLI Script
4
+ (MODIFICADO PARA SER IMPORTÁVEL E SUPORTAR CALLBACKS)
5
  """
6
 
7
  import sys
 
9
  import argparse
10
  import time
11
  import multiprocessing as mp
12
+ # Garante o uso seguro de CUDA com multiprocessing, essencial para estabilidade.
13
  if mp.get_start_method(allow_none=True) != 'spawn':
14
  mp.set_start_method('spawn', force=True)
15
+
16
  # -------------------------------------------------------------
17
+ # 1) Configuração de alocação de memória da VRAM (essencial para performance)
18
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
19
 
20
+ # 2) Pré-análise dos argumentos para configurar a visibilidade dos dispositivos CUDA
21
  _pre_parser = argparse.ArgumentParser(add_help=False)
22
  _pre_parser.add_argument("--cuda_device", type=str, default=None)
23
  _pre_args, _ = _pre_parser.parse_known_args()
24
  if _pre_args.cuda_device is not None:
25
  device_list_env = [x.strip() for x in _pre_args.cuda_device.split(',') if x.strip()!='']
26
  if len(device_list_env) == 1:
27
+ # Se apenas uma GPU for especificada, restringe a visibilidade do PyTorch a ela.
28
  os.environ["CUDA_VISIBLE_DEVICES"] = device_list_env[0]
29
 
30
  # -------------------------------------------------------------
31
+ # 3) Importações pesadas (torch, etc.) são feitas após a configuração do ambiente.
32
  import torch
33
  import cv2
34
  import numpy as np
 
36
  from pathlib import Path
37
  from src.utils.downloads import download_weight
38
 
39
+ # Adiciona o diretório raiz do projeto ao path do sistema para permitir importações de `src`
40
  script_dir = os.path.dirname(os.path.abspath(__file__))
41
  if script_dir not in sys.path:
42
  sys.path.insert(0, script_dir)
 
46
 
47
  def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
48
  """
49
+ Extrai quadros de um vídeo e os converte para o formato de tensor.
 
 
 
 
 
 
 
 
 
50
  """
51
  if debug:
52
  print(f"🎬 Extracting frames from video: {video_path}")
 
54
  if not os.path.exists(video_path):
55
  raise FileNotFoundError(f"Video file not found: {video_path}")
56
 
 
57
  cap = cv2.VideoCapture(video_path)
58
  if not cap.isOpened():
59
  raise ValueError(f"Cannot open video file: {video_path}")
60
 
 
61
  fps = cap.get(cv2.CAP_PROP_FPS)
62
  frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
63
  width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
 
79
  if not ret:
80
  break
81
 
 
82
  if frame_idx < skip_first_frames:
83
  frame_idx += 1
 
 
84
  continue
85
 
 
86
  if load_cap is not None and load_cap > 0 and frames_loaded >= load_cap:
 
 
87
  break
88
 
 
89
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 
 
90
  frame = frame.astype(np.float32) / 255.0
91
 
92
  frames.append(frame)
 
105
  if debug:
106
  print(f"✅ Extracted {len(frames)} frames")
107
 
 
108
  frames_tensor = torch.from_numpy(np.stack(frames)).to(torch.float16)
109
 
110
  if debug:
 
115
 
116
  def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
117
  """
118
+ Salva um tensor de quadros em um arquivo de vídeo.
 
 
 
 
 
 
119
  """
120
  if debug:
121
  print(f"🎬 Saving {frames_tensor.shape[0]} frames to video: {output_path}")
122
 
 
123
  os.makedirs(os.path.dirname(output_path), exist_ok=True)
124
 
125
+ frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
 
 
126
 
 
127
  T, H, W, C = frames_np.shape
128
 
 
129
  fourcc = cv2.VideoWriter_fourcc(*'mp4v')
130
  out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
131
 
132
  if not out.isOpened():
133
  raise ValueError(f"Cannot create video writer for: {output_path}")
134
 
 
135
  for i, frame in enumerate(frames_np):
 
136
  frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
137
  out.write(frame_bgr)
138
 
 
144
  if debug:
145
  print(f"✅ Video saved successfully: {output_path}")
146
 
147
+ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue, progress_queue=None):
 
148
  """
149
+ Processo filho (worker) que executa o upscaling em uma GPU dedicada.
 
 
 
 
 
 
150
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
152
  os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
153
 
 
157
 
158
  frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
159
 
160
+ # Cria uma função de callback local que envia o progresso para a fila de comunicação
161
  local_progress_callback = None
162
  if progress_queue:
163
  def callback_wrapper(batch_idx, total_batches, current_frames, message):
164
+ # Envia uma tupla com informações de progresso para a fila
165
+ progress_queue.put((proc_idx, batch_idx, total_batches, message))
166
  local_progress_callback = callback_wrapper
167
 
168
  runner = configure_runner(shared_args["model"], shared_args["model_dir"], shared_args["preserve_vram"], shared_args["debug"])
169
 
170
+ # Passa o callback local para o generation_loop, que sabe como usá-lo
171
  result_tensor = generation_loop(
172
  runner=runner, images=frames_tensor, cfg_scale=shared_args["cfg_scale"],
173
  seed=shared_args["seed"], res_w=shared_args["res_w"], batch_size=shared_args["batch_size"],
174
  preserve_vram=shared_args["preserve_vram"], temporal_overlap=shared_args["temporal_overlap"],
175
  debug=shared_args["debug"],
176
+ progress_callback=local_progress_callback
177
  )
178
+ # Envia o resultado final de volta para o processo pai
179
  return_queue.put((proc_idx, result_tensor.cpu().numpy()))
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
+ def _gpu_processing(frames_tensor, device_list, args, progress_callback=None):
182
+ """
183
+ Divide os quadros entre as GPUs e gerencia os processos filhos, monitorando o progresso.
184
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  num_devices = len(device_list)
186
  chunks = torch.chunk(frames_tensor, num_devices, dim=0)
187
 
188
  manager = mp.Manager()
189
  return_queue = manager.Queue()
190
+ progress_queue = manager.Queue() if progress_callback else None
191
  workers = []
192
 
193
  shared_args = {
 
202
  workers.append(p)
203
 
204
  results_np = [None] * num_devices
205
+ collected_workers = 0
206
+ worker_progress = [0.0] * num_devices # Armazena o progresso individual de cada worker
207
+
208
+ while collected_workers < num_devices:
209
+ # 1. Processa todas as mensagens de progresso na fila de forma não-bloqueante
210
+ if progress_queue:
211
+ while not progress_queue.empty():
212
+ proc_idx, batch_idx, total_batches, message = progress_queue.get()
213
+ if total_batches > 0:
214
+ worker_progress[proc_idx] = batch_idx / total_batches
215
+
216
+ # Calcula o progresso geral como a média do progresso de todos os workers
217
+ total_progress = sum(worker_progress) / num_devices
218
+
219
+ # Chama o callback principal (do Gradio) com a informação formatada
220
+ progress_callback(total_progress, desc=f"GPU {proc_idx+1}: {message}")
221
+
222
+ # 2. Verifica se algum worker terminou e enviou seu resultado
223
  if not return_queue.empty():
224
  proc_idx, res_np = return_queue.get()
225
  results_np[proc_idx] = res_np
226
+ worker_progress[proc_idx] = 1.0 # Marca este worker como 100% concluído
227
+ collected_workers += 1
228
+
229
+ time.sleep(0.2) # Pequena pausa para evitar uso excessivo da CPU no loop
230
 
231
  for p in workers: p.join()
232
 
233
  return torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
  def parse_arguments():
236
+ """Analisa os argumentos da linha de comando."""
237
  parser = argparse.ArgumentParser(description="SeedVR2 Video Upscaler CLI")
238
 
239
+ parser.add_argument("--video_path", type=str, required=True, help="Path to input video file")
240
+ parser.add_argument("--seed", type=int, default=100, help="Random seed for generation (default: 100)")
241
+ parser.add_argument("--resolution", type=int, default=1072, help="Target resolution of the short side (default: 1072)")
242
+ parser.add_argument("--batch_size", type=int, default=5, help="Number of frames per batch (default: 5)")
243
+ parser.add_argument("--model", type=str, default="seedvr2_ema_3b_fp16.safetensors",
244
+ choices=["seedvr2_ema_3b_fp16.safetensors", "seedvr2_ema_3b_fp8_e4m3fn.safetensors",
245
+ "seedvr2_ema_7b_fp16.safetensors", "seedvr2_ema_7b_fp8_e4m3fn.safetensors"],
246
+ help="Model to use")
247
+ parser.add_argument("--model_dir", type=str, default=None, help="Directory containing the model files")
248
+ parser.add_argument("--skip_first_frames", type=int, default=0, help="Skip the first frames during processing")
249
+ parser.add_argument("--load_cap", type=int, default=0, help="Maximum number of frames to load from video (default: load all)")
250
+ parser.add_argument("--output", type=str, default=None, help="Output path")
251
+ parser.add_argument("--output_format", type=str, default="video", choices=["video", "png"], help="Output format: 'video' (mp4) or 'png' images")
252
+ parser.add_argument("--preserve_vram", action="store_true", help="Enable VRAM preservation mode")
253
+ parser.add_argument("--debug", action="store_true", help="Enable debug logging")
254
+ parser.add_argument("--cuda_device", type=str, default=None, help="CUDA device id(s). e.g., '0' or '0,1' for multi-GPU")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
 
256
  return parser.parse_args()
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  def run_inference_logic(args, progress_callback=None):
259
  """
260
+ Função principal que executa o pipeline de upscaling. Pode ser importada e chamada por outros scripts.
 
 
261
  """
262
  if args.debug:
263
+ print(f"📋 Argumentos da Lógica de Inferência: {vars(args)}")
 
 
264
 
 
265
  print("🎬 Extraindo frames do vídeo...")
266
  start_time = time.time()
267
  frames_tensor, original_fps = extract_frames_from_video(
 
269
  )
270
  if args.debug:
271
  print(f"🔄 Tempo de extração de frames: {time.time() - start_time:.2f}s")
272
+
 
 
 
 
 
 
273
  device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
274
  if args.debug:
275
  print(f"🚀 Usando dispositivos: {device_list}")
 
277
  processing_start = time.time()
278
  download_weight(args.model, args.model_dir)
279
 
280
+ # Passa o callback para a função de processamento, que o gerenciará
281
+ result_tensor = _gpu_processing(frames_tensor, device_list, args, progress_callback)
 
 
 
 
 
 
 
 
282
 
283
  generation_time = time.time() - processing_start
284
  if args.debug:
285
  print(f"🔄 Tempo de Geração: {generation_time:.2f}s")
286
  print(f"📊 Resultado: {result_tensor.shape}, dtype: {result_tensor.dtype}")
287
 
288
+ # Retorna o tensor e metadados em memória para o chamador
289
  return result_tensor, original_fps, generation_time, len(frames_tensor)
290
 
 
 
 
 
291
  def main():
292
+ """
293
+ Função principal para execução via linha de comando (CLI).
294
+ """
295
  print(f"🚀 SeedVR2 Video Upscaler CLI iniciado às {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
 
296
  args = parse_arguments()
 
297
  try:
298
+ # Chama a função de lógica principal
299
  result_tensor, original_fps, _, _ = run_inference_logic(args)
300
+
301
+ # Salva o resultado no disco, como esperado pelo modo CLI
302
  print(f"💾 Salvando vídeo em: {args.output}")
 
303
  save_frames_to_video(result_tensor, args.output, original_fps, args.debug)
304
+ print("✅ Upscaling via CLI concluído com sucesso!")
 
 
 
305
 
306
  except Exception as e:
307
+ print(f"❌ Erro durante o processamento via CLI: {e}")
308
  import traceback
309
  traceback.print_exc()
310
  sys.exit(1)
311
 
312
+ # Ponto de entrada para quando o script é executado diretamente
313
  if __name__ == "__main__":
314
+ main()