Spaces:
Runtime error
Runtime error
| import torch | |
| import torch.nn.functional as F | |
| import numpy as np | |
| import cv2 | |
| from PIL import Image | |
| from config import SAPIENS_LITE_MODELS_PATH | |
| def load_model(task, version): | |
| try: | |
| model_path = SAPIENS_LITE_MODELS_PATH[task][version] | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8: | |
| torch.backends.cuda.matmul.allow_tf32 = True | |
| torch.backends.cudnn.allow_tf32 = True | |
| model = torch.jit.load(model_path) | |
| model.eval() | |
| model.to(device) | |
| return model, device | |
| except KeyError as e: | |
| print(f"Error: Tarea o versión inválida. {e}") | |
| return None, None | |
| def preprocess_image(image, input_shape): | |
| img = cv2.resize(image, (input_shape[2], input_shape[1]), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1) | |
| img = torch.from_numpy(img) | |
| img = img[[2, 1, 0], ...].float() | |
| mean = torch.tensor([123.5, 116.5, 103.5]).view(-1, 1, 1) | |
| std = torch.tensor([58.5, 57.0, 57.5]).view(-1, 1, 1) | |
| img = (img - mean) / std | |
| return img.unsqueeze(0) | |
| def post_process_depth(result, original_shape): | |
| if result.dim() == 3: | |
| result = result.unsqueeze(0) | |
| elif result.dim() == 4: | |
| pass | |
| else: | |
| raise ValueError(f"Unexpected result dimension: {result.dim()}") | |
| seg_logits = F.interpolate(result, size=original_shape, mode="bilinear", align_corners=False).squeeze(0) | |
| depth_map = seg_logits.data.float().cpu().numpy() | |
| if depth_map.ndim == 3 and depth_map.shape[0] == 1: | |
| depth_map = depth_map.squeeze(0) | |
| return depth_map | |
| def visualize_depth(depth_map): | |
| min_val, max_val = np.nanmin(depth_map), np.nanmax(depth_map) | |
| depth_normalized = 1 - ((depth_map - min_val) / (max_val - min_val)) | |
| depth_normalized = (depth_normalized * 255).astype(np.uint8) | |
| depth_colored = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_INFERNO) | |
| return depth_colored | |
| def calculate_surface_normal(depth_map): | |
| kernel_size = 7 | |
| grad_x = cv2.Sobel(depth_map.astype(np.float32), cv2.CV_32F, 1, 0, ksize=kernel_size) | |
| grad_y = cv2.Sobel(depth_map.astype(np.float32), cv2.CV_32F, 0, 1, ksize=kernel_size) | |
| z = np.full(grad_x.shape, -1) | |
| normals = np.dstack((-grad_x, -grad_y, z)) | |
| normals_mag = np.linalg.norm(normals, axis=2, keepdims=True) | |
| with np.errstate(divide="ignore", invalid="ignore"): | |
| normals_normalized = normals / (normals_mag + 1e-5) | |
| normals_normalized = np.nan_to_num(normals_normalized, nan=-1, posinf=-1, neginf=-1) | |
| normal_from_depth = ((normals_normalized + 1) / 2 * 255).astype(np.uint8) | |
| normal_from_depth = normal_from_depth[:, :, ::-1] # RGB to BGR for cv2 | |
| return normal_from_depth | |
| def process_image_or_video(input_data, task='depth', version='sapiens_0.3b'): | |
| model, device = load_model(task, version) | |
| if model is None or device is None: | |
| return None | |
| input_shape = (3, 1024, 768) | |
| def process_frame(frame): | |
| if isinstance(frame, Image.Image): | |
| frame = np.array(frame) | |
| if frame.shape[2] == 4: # RGBA | |
| frame = cv2.cvtColor(frame, cv2.COLOR_RGBA2RGB) | |
| img = preprocess_image(frame, input_shape) | |
| with torch.no_grad(): | |
| result = model(img.to(device)) | |
| depth_map = post_process_depth(result, (frame.shape[0], frame.shape[1])) | |
| depth_image = visualize_depth(depth_map) | |
| return Image.fromarray(cv2.cvtColor(depth_image, cv2.COLOR_BGR2RGB)) | |
| if isinstance(input_data, np.ndarray): # Video frame | |
| return process_frame(input_data) | |
| elif isinstance(input_data, Image.Image): # Imagen | |
| return process_frame(input_data) | |
| else: | |
| print("Tipo de entrada no soportado. Por favor, proporcione una imagen PIL o un frame de video numpy.") | |
| return None |