File size: 3,949 Bytes
084d74a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import sys

PPF_PATH = "/mnt/prev_nas/qhy_1/GenSpace/osdsynth/external/PerspectiveFields"
sys.path.append(PPF_PATH)  # This is needed for the following imports in this file

PPF_PATH_ABS = os.path.abspath(PPF_PATH)

import copy
import os

import cv2
import numpy as np
import torch
from perspective2d import PerspectiveFields
from perspective2d.utils import draw_from_r_p_f_cx_cy, draw_perspective_fields


def create_rotation_matrix(
    roll: float,
    pitch: float,
    yaw: float,
    degrees: bool = False,
) -> np.ndarray:
    r"""Create rotation matrix from extrinsic parameters
    Args:
        roll (float): camera rotation about camera frame z-axis
        pitch (float): camera rotation about camera frame x-axis
        yaw (float): camera rotation about camera frame y-axis

    Returns:
        np.ndarray: rotation R_z @ R_x @ R_y
    """
    if degrees:
        roll = np.radians(roll)
        pitch = np.radians(pitch)
        yaw = np.radians(yaw)
    # calculate rotation about the x-axis
    R_x = np.array(
        [
            [1.0, 0.0, 0.0],
            [0.0, np.cos(pitch), np.sin(pitch)],
            [0.0, -np.sin(pitch), np.cos(pitch)],
        ]
    )
    # calculate rotation about the y-axis
    R_y = np.array(
        [
            [np.cos(yaw), 0.0, -np.sin(yaw)],
            [0.0, 1.0, 0.0],
            [np.sin(yaw), 0.0, np.cos(yaw)],
        ]
    )
    # calculate rotation about the z-axis
    R_z = np.array(
        [
            [np.cos(roll), np.sin(roll), 0.0],
            [-np.sin(roll), np.cos(roll), 0.0],
            [0.0, 0.0, 1.0],
        ]
    )

    return R_z @ R_x @ R_y


def resize_fix_aspect_ratio(img, field, target_width=None, target_height=None):
    height = img.shape[0]
    width = img.shape[1]
    if target_height is None:
        factor = target_width / width
    elif target_width is None:
        factor = target_height / height
    else:
        factor = max(target_width / width, target_height / height)
    if factor == target_width / width:
        target_height = int(height * factor)
    else:
        target_width = int(width * factor)

    img = cv2.resize(img, (target_width, target_height))
    for key in field:
        if key not in ["up", "lati"]:
            continue
        tmp = field[key].numpy()
        transpose = len(tmp.shape) == 3
        if transpose:
            tmp = tmp.transpose(1, 2, 0)
        tmp = cv2.resize(tmp, (target_width, target_height))
        if transpose:
            tmp = tmp.transpose(2, 0, 1)
        field[key] = torch.tensor(tmp)
    return img, field


def run_perspective_fields_model(model, image_bgr):

    pred = model.inference(img_bgr=image_bgr)
    field = {
        "up": pred["pred_gravity_original"].cpu().detach(),
        "lati": pred["pred_latitude_original"].cpu().detach(),
    }
    img, field = resize_fix_aspect_ratio(image_bgr[..., ::-1], field, 640)

    # Draw perspective field from ParamNet predictions
    param_vis = draw_from_r_p_f_cx_cy(
        img,
        pred["pred_roll"].item(),
        pred["pred_pitch"].item(),
        pred["pred_general_vfov"].item(),
        pred["pred_rel_cx"].item(),
        pred["pred_rel_cy"].item(),
        "deg",
        up_color=(0, 1, 0),
    ).astype(np.uint8)
    param_vis = cv2.cvtColor(param_vis, cv2.COLOR_RGB2BGR)

    param = {
        "roll": pred["pred_roll"].cpu().item(),
        "pitch": pred["pred_pitch"].cpu().item(),
    }

    return param_vis, param


def get_perspective_fields_model(cfg, device):
    MODEL_ID = "Paramnet-360Cities-edina-centered"
    # MODEL_ID = 'Paramnet-360Cities-edina-uncentered'
    # MODEL_ID = 'PersNet_Paramnet-GSV-centered'
    # MODEL_ID = 'PersNet_Paramnet-GSV-uncentered'
    # MODEL_ID = 'PersNet-360Cities'
    # feel free to test with uncentered or centered depending on your data

    PerspectiveFields.versions()
    pf_model = PerspectiveFields(MODEL_ID).eval().cuda()
    return pf_model