EuuIia commited on
Commit
aa84ba3
·
verified ·
1 Parent(s): c96bb7c

Upload inference_cli (2).py

Browse files
Files changed (1) hide show
  1. inference_cli (2).py +515 -0
inference_cli (2).py ADDED
@@ -0,0 +1,515 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Standalone SeedVR2 Video Upscaler CLI Script
4
+ """
5
+
6
+ import sys
7
+ import os
8
+ import argparse
9
+ import time
10
+ import multiprocessing as mp
11
+ # Ensure safe CUDA usage with multiprocessing
12
+ if mp.get_start_method(allow_none=True) != 'spawn':
13
+ mp.set_start_method('spawn', force=True)
14
+ # -------------------------------------------------------------
15
+ # 1) Gestion VRAM (cudaMallocAsync) déjà en place
16
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
17
+
18
+ # 2) Pré-parse de la ligne de commande pour récupérer --cuda_device
19
+ _pre_parser = argparse.ArgumentParser(add_help=False)
20
+ _pre_parser.add_argument("--cuda_device", type=str, default=None)
21
+ _pre_args, _ = _pre_parser.parse_known_args()
22
+ if _pre_args.cuda_device is not None:
23
+ device_list_env = [x.strip() for x in _pre_args.cuda_device.split(',') if x.strip()!='']
24
+ if len(device_list_env) == 1:
25
+ # Single GPU: restrict visibility now
26
+ os.environ["CUDA_VISIBLE_DEVICES"] = device_list_env[0]
27
+
28
+ # -------------------------------------------------------------
29
+ # 3) Imports lourds (torch, etc.) après la configuration env
30
+ import torch
31
+ import cv2
32
+ import numpy as np
33
+ from datetime import datetime
34
+ from pathlib import Path
35
+ from src.utils.downloads import download_weight
36
+
37
+ # Add project root to sys.path for src module imports
38
+ script_dir = os.path.dirname(os.path.abspath(__file__))
39
+ if script_dir not in sys.path:
40
+ sys.path.insert(0, script_dir)
41
+ root_dir = os.path.join(script_dir, '..', '..')
42
+ if root_dir not in sys.path:
43
+ sys.path.insert(0, root_dir)
44
+
45
+ def extract_frames_from_video(video_path, debug=False, skip_first_frames=0, load_cap=None):
46
+ """
47
+ Extract frames from video and convert to tensor format
48
+
49
+ Args:
50
+ video_path (str): Path to input video
51
+ debug (bool): Enable debug logging
52
+ skip_first_frame (bool): Skip the first frame during extraction
53
+ load_cap (int): Maximum number of frames to load (None for all)
54
+
55
+ Returns:
56
+ torch.Tensor: Frames tensor in format [T, H, W, C] (Float16, normalized 0-1)
57
+ """
58
+ if debug:
59
+ print(f"🎬 Extracting frames from video: {video_path}")
60
+
61
+ if not os.path.exists(video_path):
62
+ raise FileNotFoundError(f"Video file not found: {video_path}")
63
+
64
+ # Open video
65
+ cap = cv2.VideoCapture(video_path)
66
+ if not cap.isOpened():
67
+ raise ValueError(f"Cannot open video file: {video_path}")
68
+
69
+ # Get video properties
70
+ fps = cap.get(cv2.CAP_PROP_FPS)
71
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
72
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
73
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
74
+
75
+ if debug:
76
+ print(f"📊 Video info: {frame_count} frames, {width}x{height}, {fps:.2f} FPS")
77
+ if skip_first_frames:
78
+ print(f"⏭️ Will skip first {skip_first_frames} frames")
79
+ if load_cap:
80
+ print(f"🔢 Will load maximum {load_cap} frames")
81
+
82
+ frames = []
83
+ frame_idx = 0
84
+ frames_loaded = 0
85
+
86
+ while True:
87
+ ret, frame = cap.read()
88
+ if not ret:
89
+ break
90
+
91
+ # Skip first frame if requested
92
+ if frame_idx < skip_first_frames:
93
+ frame_idx += 1
94
+ if debug:
95
+ print(f"⏭️ Skipped first frame")
96
+ continue
97
+
98
+ # Check load cap
99
+ if load_cap is not None and load_cap > 0 and frames_loaded >= load_cap:
100
+ if debug:
101
+ print(f"🔢 Reached load cap of {load_cap} frames")
102
+ break
103
+
104
+ # Convert BGR to RGB
105
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
106
+
107
+ # Convert to float32 and normalize to 0-1
108
+ frame = frame.astype(np.float32) / 255.0
109
+
110
+ frames.append(frame)
111
+ frame_idx += 1
112
+ frames_loaded += 1
113
+
114
+ if debug and frames_loaded % 100 == 0:
115
+ total_to_load = min(frame_count, load_cap) if load_cap else frame_count
116
+ print(f"📹 Extracted {frames_loaded}/{total_to_load} frames")
117
+
118
+ cap.release()
119
+
120
+ if len(frames) == 0:
121
+ raise ValueError(f"No frames extracted from video: {video_path}")
122
+
123
+ if debug:
124
+ print(f"✅ Extracted {len(frames)} frames")
125
+
126
+ # Convert to tensor [T, H, W, C] and cast to Float16 for ComfyUI compatibility
127
+ frames_tensor = torch.from_numpy(np.stack(frames)).to(torch.float16)
128
+
129
+ if debug:
130
+ print(f"📊 Frames tensor shape: {frames_tensor.shape}, dtype: {frames_tensor.dtype}")
131
+
132
+ return frames_tensor, fps
133
+
134
+
135
+ def save_frames_to_video(frames_tensor, output_path, fps=30.0, debug=False):
136
+ """
137
+ Save frames tensor to video file
138
+
139
+ Args:
140
+ frames_tensor (torch.Tensor): Frames in format [T, H, W, C] (Float16, 0-1)
141
+ output_path (str): Output video path
142
+ fps (float): Output video FPS
143
+ debug (bool): Enable debug logging
144
+ """
145
+ if debug:
146
+ print(f"🎬 Saving {frames_tensor.shape[0]} frames to video: {output_path}")
147
+
148
+ # Ensure output directory exists
149
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
150
+
151
+ # Convert tensor to numpy and denormalize
152
+ frames_np = frames_tensor.cpu().numpy()
153
+ frames_np = (frames_np * 255.0).astype(np.uint8)
154
+
155
+ # Get video properties
156
+ T, H, W, C = frames_np.shape
157
+
158
+ # Initialize video writer
159
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
160
+ out = cv2.VideoWriter(output_path, fourcc, fps, (W, H))
161
+
162
+ if not out.isOpened():
163
+ raise ValueError(f"Cannot create video writer for: {output_path}")
164
+
165
+ # Write frames
166
+ for i, frame in enumerate(frames_np):
167
+ # Convert RGB to BGR for OpenCV
168
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
169
+ out.write(frame_bgr)
170
+
171
+ if debug and (i + 1) % 100 == 0:
172
+ print(f"💾 Saved {i + 1}/{T} frames")
173
+
174
+ out.release()
175
+
176
+ if debug:
177
+ print(f"✅ Video saved successfully: {output_path}")
178
+
179
+
180
+ def save_frames_to_png(frames_tensor, output_dir, base_name, debug=False):
181
+ """
182
+ Save frames tensor as sequential PNG images.
183
+
184
+ Args:
185
+ frames_tensor (torch.Tensor): Frames in format [T, H, W, C] (Float16, 0-1)
186
+ output_dir (str): Directory to save PNGs
187
+ base_name (str): Base name for output files (without extension)
188
+ debug (bool): Enable debug logging
189
+ """
190
+ if debug:
191
+ print(f"🖼️ Saving {frames_tensor.shape[0]} frames as PNGs to directory: {output_dir}")
192
+
193
+ # Ensure output directory exists
194
+ os.makedirs(output_dir, exist_ok=True)
195
+
196
+ # Convert to numpy uint8 RGB
197
+ frames_np = (frames_tensor.cpu().numpy() * 255.0).astype(np.uint8)
198
+ total = frames_np.shape[0]
199
+ digits = max(5, len(str(total))) # at least 5 digits
200
+
201
+ for idx, frame in enumerate(frames_np):
202
+ filename = f"{base_name}_{idx:0{digits}d}.png"
203
+ file_path = os.path.join(output_dir, filename)
204
+ # Convert RGB to BGR for cv2
205
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
206
+ cv2.imwrite(file_path, frame_bgr)
207
+ if debug and (idx + 1) % 100 == 0:
208
+ print(f"💾 Saved {idx + 1}/{total} PNGs")
209
+
210
+ if debug:
211
+ print(f"✅ PNG saving completed: {total} files in '{output_dir}'")
212
+
213
+
214
+ def _worker_process(proc_idx, device_id, frames_np, shared_args, return_queue):
215
+ """Worker process that performs upscaling on a slice of frames using a dedicated GPU."""
216
+ # 1. Limit CUDA visibility to the chosen GPU BEFORE importing torch-heavy deps
217
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
218
+ # Keep same cudaMallocAsync setting
219
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "backend:cudaMallocAsync")
220
+
221
+ import torch # local import inside subprocess
222
+ from src.core.model_manager import configure_runner
223
+ from src.core.generation import generation_loop
224
+
225
+
226
+ # Reconstruct frames tensor
227
+ frames_tensor = torch.from_numpy(frames_np).to(torch.float16)
228
+
229
+ # Prepare runner
230
+ model_dir = shared_args["model_dir"]
231
+ model_name = shared_args["model"]
232
+ # ensure model weights present (each process checks but very fast if already downloaded)
233
+ if shared_args["debug"]:
234
+ print(f"🔄 Configuring runner for device {device_id}")
235
+ runner = configure_runner(model_name, model_dir, shared_args["preserve_vram"], shared_args["debug"])
236
+
237
+ # Run generation
238
+ result_tensor = generation_loop(
239
+ runner=runner,
240
+ images=frames_tensor,
241
+ cfg_scale=shared_args["cfg_scale"],
242
+ seed=shared_args["seed"],
243
+ res_w=shared_args["res_w"],
244
+ batch_size=shared_args["batch_size"],
245
+ preserve_vram=shared_args["preserve_vram"],
246
+ temporal_overlap=shared_args["temporal_overlap"],
247
+ debug=shared_args["debug"],
248
+ )
249
+
250
+ # Send back result as numpy array to avoid CUDA transfers
251
+ return_queue.put((proc_idx, result_tensor.cpu().numpy()))
252
+
253
+
254
+ def _gpu_processing(frames_tensor, device_list, args):
255
+ """Split frames and process them in parallel on multiple GPUs."""
256
+ num_devices = len(device_list)
257
+ # split frames tensor along time dimension
258
+ chunks = torch.chunk(frames_tensor, num_devices, dim=0)
259
+
260
+ manager = mp.Manager()
261
+ return_queue = manager.Queue()
262
+ workers = []
263
+
264
+ shared_args = {
265
+ "model": args.model,
266
+ "model_dir": args.model_dir if args.model_dir is not None else "./models/SEEDVR2",
267
+ "preserve_vram": args.preserve_vram,
268
+ "debug": args.debug,
269
+ "cfg_scale": 1.0,
270
+ "seed": args.seed,
271
+ "res_w": args.resolution,
272
+ "batch_size": args.batch_size,
273
+ "temporal_overlap": 0,
274
+ }
275
+
276
+ for idx, (device_id, chunk_tensor) in enumerate(zip(device_list, chunks)):
277
+ p = mp.Process(
278
+ target=_worker_process,
279
+ args=(idx, device_id, chunk_tensor.cpu().numpy(), shared_args, return_queue),
280
+ )
281
+ p.start()
282
+ workers.append(p)
283
+
284
+ results_np = [None] * num_devices
285
+ collected = 0
286
+ while collected < num_devices:
287
+ proc_idx, res_np = return_queue.get()
288
+ results_np[proc_idx] = res_np
289
+ collected += 1
290
+
291
+ for p in workers:
292
+ p.join()
293
+
294
+ # Concatenate results in original order
295
+ result_tensor = torch.from_numpy(np.concatenate(results_np, axis=0)).to(torch.float16)
296
+ return result_tensor
297
+
298
+
299
+ def parse_arguments():
300
+ """Parse command line arguments"""
301
+ parser = argparse.ArgumentParser(description="SeedVR2 Video Upscaler CLI")
302
+
303
+ parser.add_argument("--video_path", type=str, required=True,
304
+ help="Path to input video file")
305
+ parser.add_argument("--seed", type=int, default=100,
306
+ help="Random seed for generation (default: 100)")
307
+ parser.add_argument("--resolution", type=int, default=1072,
308
+ help="Target resolution of the short side (default: 1072)")
309
+ parser.add_argument("--batch_size", type=int, default=1,
310
+ help="Number of frames per batch (default: 5)")
311
+ parser.add_argument("--model", type=str, default="seedvr2_ema_3b_fp8_e4m3fn.safetensors",
312
+ choices=[
313
+ "seedvr2_ema_3b_fp16.safetensors",
314
+ "seedvr2_ema_3b_fp8_e4m3fn.safetensors",
315
+ "seedvr2_ema_7b_fp16.safetensors",
316
+ "seedvr2_ema_7b_fp8_e4m3fn.safetensors"
317
+ ],
318
+ help="Model to use (default: 3B FP8)")
319
+ parser.add_argument("--model_dir", type=str, default="seedvr2_models",
320
+ help="Directory containing the model files (default: use cache directory)")
321
+ parser.add_argument("--skip_first_frames", type=int, default=0,
322
+ help="Skip the first frames during processing")
323
+ parser.add_argument("--load_cap", type=int, default=0,
324
+ help="Maximum number of frames to load from video (default: load all)")
325
+ parser.add_argument("--output", type=str, default=None,
326
+ help="Output path (default: auto-generated, if output_format is png, it will be a directory)")
327
+ parser.add_argument("--output_format", type=str, default="video", choices=["video", "png"],
328
+ help="Output format: 'video' (mp4) or 'png' images (default: video)")
329
+ parser.add_argument("--preserve_vram", action="store_true",
330
+ help="Enable VRAM preservation mode")
331
+ parser.add_argument("--debug", action="store_true",
332
+ help="Enable debug logging")
333
+ parser.add_argument("--cuda_device", type=str, default=None,
334
+ help="CUDA device id(s). Single id (e.g., '0') or comma-separated list '0,1' for multi-GPU")
335
+
336
+ return parser.parse_args()
337
+
338
+
339
+ def main():
340
+ """Main CLI function"""
341
+ print(f"🚀 SeedVR2 Video Upscaler CLI started at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
342
+
343
+ # Parse arguments
344
+ args = parse_arguments()
345
+
346
+ if args.debug:
347
+ print(f"📋 Arguments:")
348
+ for key, value in vars(args).items():
349
+ print(f" {key}: {value}")
350
+
351
+ if args.debug:
352
+ # Show actual CUDA device visibility
353
+ print(f"🖥️ CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'Not set (all)')}")
354
+ if torch.cuda.is_available():
355
+ print(f"🖥️ torch.cuda.device_count(): {torch.cuda.device_count()}")
356
+ print(f"🖥️ Using device index 0 inside script (mapped to selected GPU)")
357
+
358
+ try:
359
+ # Ensure --output is a directory when using PNG format
360
+ if args.output_format == "png":
361
+ output_path_obj = Path(args.output)
362
+ if output_path_obj.suffix: # an extension is present, strip it
363
+ args.output = str(output_path_obj.with_suffix(''))
364
+
365
+ if args.debug:
366
+ print(f"📁 Output will be saved to: {args.output}")
367
+
368
+ # Extract frames from video
369
+ print(f"🎬 Extracting frames from video...")
370
+ start_time = time.time()
371
+ frames_tensor, original_fps = extract_frames_from_video(
372
+ args.video_path,
373
+ args.debug,
374
+ args.skip_first_frames,
375
+ args.load_cap
376
+ )
377
+
378
+ if args.debug:
379
+ print(f"🔄 Frame extraction time: {time.time() - start_time:.2f}s")
380
+ # print(f"📊 Initial VRAM: {torch.cuda.memory_allocated() / 1024**3:.2f}GB") # may initialize cuda
381
+
382
+ # Parse GPU list
383
+ device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
384
+ if args.debug:
385
+ print(f"🚀 Using devices: {device_list}")
386
+ processing_start = time.time()
387
+ download_weight(args.model, args.model_dir)
388
+ result = _gpu_processing(frames_tensor, device_list, args)
389
+ generation_time = time.time() - processing_start
390
+
391
+ if args.debug:
392
+ print(f"🔄 Generation time: {generation_time:.2f}s")
393
+ print(f"📊 Peak VRAM usage: {torch.cuda.max_memory_allocated() / 1024**3:.2f}GB")
394
+ print(f"📊 Result shape: {result.shape}, dtype: {result.dtype}")
395
+
396
+ # After generation_time calculation, choose saving method
397
+ if args.output_format == "png":
398
+ # Ensure output treated as directory
399
+ output_dir = args.output
400
+ base_name = Path(args.video_path).stem + "_upscaled"
401
+ if args.debug:
402
+ print(f"🖼️ Saving PNG frames to directory: {output_dir}")
403
+ save_start = time.time()
404
+ save_frames_to_png(result, output_dir, base_name, args.debug)
405
+ if args.debug:
406
+ print(f"🔄 Save time: {time.time() - save_start:.2f}s")
407
+ else:
408
+ # Save video
409
+ if args.debug:
410
+ print(f"💾 Saving upscaled video to: {args.output}")
411
+ save_start = time.time()
412
+ save_frames_to_video(result, args.output, original_fps, args.debug)
413
+ if args.debug:
414
+ print(f"🔄 Save time: {time.time() - save_start:.2f}s")
415
+
416
+ total_time = time.time() - start_time
417
+ print(f"✅ Upscaling completed successfully!")
418
+ if args.output_format == "png":
419
+ print(f"📁 PNG frames saved in directory: {args.output}")
420
+ else:
421
+ print(f"📁 Output saved to video: {args.output}")
422
+ print(f"🕒 Total processing time: {total_time:.2f}s")
423
+ print(f"⚡ Average FPS: {len(frames_tensor) / generation_time:.2f} frames/sec")
424
+
425
+ except Exception as e:
426
+ print(f"❌ Error during processing: {e}")
427
+ import traceback
428
+ traceback.print_exc()
429
+ sys.exit(1)
430
+
431
+ finally:
432
+ print(f"🧹 Process {os.getpid()} terminating - VRAM will be automatically freed")
433
+
434
+ def run_inference_logic(args, progress_callback=None):
435
+ """
436
+ Função principal que executa o pipeline de upscaling.
437
+ Pode ser chamada tanto pelo CLI quanto por outra parte do código.
438
+ 'args' pode ser um objeto argparse ou qualquer objeto com atributos correspondentes.
439
+ """
440
+ if args.debug:
441
+ print(f"📋 Argumentos da Lógica de Inferência:")
442
+ for key, value in vars(args).items():
443
+ print(f" {key}: {value}")
444
+
445
+ # 1. Extrair Frames
446
+ print("🎬 Extraindo frames do vídeo...")
447
+ start_time = time.time()
448
+ frames_tensor, original_fps = extract_frames_from_video(
449
+ args.video_path, args.debug, args.skip_first_frames, args.load_cap
450
+ )
451
+ if args.debug:
452
+ print(f"🔄 Tempo de extração de frames: {time.time() - start_time:.2f}s")
453
+
454
+ # 2. Preparar e Executar a Inferência (Multi-GPU)
455
+ # ATENÇÃO: A lógica Multi-GPU com `multiprocessing` é complexa de passar um callback de progresso.
456
+ # Para simplificar e garantir o funcionamento, vamos focar em single-process/multi-GPU.
457
+ # A função `_gpu_processing` já chama `generation_loop`, que pode aceitar um callback.
458
+ # Precisamos garantir que ele seja passado adiante.
459
+
460
+ device_list = [d.strip() for d in str(args.cuda_device).split(',') if d.strip()] if args.cuda_device else ["0"]
461
+ if args.debug:
462
+ print(f"🚀 Usando dispositivos: {device_list}")
463
+
464
+ processing_start = time.time()
465
+ download_weight(args.model, args.model_dir)
466
+
467
+ # MODIFICAÇÃO: A função _gpu_processing deve ser ajustada para aceitar e passar o callback
468
+ # No entanto, como _gpu_processing usa multiprocessing, passar um callback de Gradio
469
+ # é complexo. Uma abordagem mais simples é remover a camada de multiprocessing por enquanto
470
+ # e chamar a lógica de inferência principal diretamente se estivermos em modo de API.
471
+ # Por agora, vamos assumir que o `_gpu_processing` lida com isso internamente.
472
+ # A maneira mais fácil de simular progresso aqui é pelo tempo.
473
+
474
+ # Esta chamada precisa ser investigada para passar o callback adiante.
475
+ # Por enquanto, o progresso virá antes e depois desta chamada.
476
+ result_tensor = _gpu_processing(frames_tensor, device_list, args) # Esta chamada é bloqueante
477
+
478
+ generation_time = time.time() - processing_start
479
+ if args.debug:
480
+ print(f"🔄 Tempo de Geração: {generation_time:.2f}s")
481
+ print(f"📊 Resultado: {result_tensor.shape}, dtype: {result_tensor.dtype}")
482
+
483
+ # 3. Retornar o resultado em memória
484
+ return result_tensor, original_fps, generation_time, len(frames_tensor)
485
+
486
+
487
+ # FUNÇÃO MAIN ORIGINAL (agora um wrapper)
488
+ def main():
489
+ """Função principal do CLI"""
490
+ print(f"🚀 SeedVR2 Video Upscaler CLI iniciado às {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
491
+
492
+ args = parse_arguments()
493
+
494
+ try:
495
+ # Chama a nova função de lógica
496
+ result_tensor, original_fps, _, _ = run_inference_logic(args)
497
+
498
+ # A parte de salvar o arquivo permanece apenas para o modo CLI
499
+ print(f"💾 Salvando vídeo em: {args.output}")
500
+ save_start = time.time()
501
+ save_frames_to_video(result_tensor, args.output, original_fps, args.debug)
502
+ if args.debug:
503
+ print(f"🔄 Tempo de salvamento: {time.time() - save_start:.2f}s")
504
+
505
+ print("✅ Upscaling CLI concluído com sucesso!")
506
+
507
+ except Exception as e:
508
+ print(f"❌ Erro durante o processamento: {e}")
509
+ import traceback
510
+ traceback.print_exc()
511
+ sys.exit(1)
512
+
513
+ # Ponto de entrada para execução via linha de comando
514
+ if __name__ == "__main__":
515
+ main()