Qnancy commited on
Commit
bffe042
·
verified ·
1 Parent(s): 885ba71

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Orient_Anything/render/__init__.py +3 -0
  2. Orient_Anything/render/core.py +370 -0
  3. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.kitti.py +132 -0
  4. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.nyu.py +136 -0
  5. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.py +1048 -0
  6. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.large.kitti.py +132 -0
  7. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.large.py +1047 -0
  8. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.small.py +1047 -0
  9. external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.small.sanity_check.py +1014 -0
  10. external/Metric3D/training/mono/configs/_base_/datasets/7scenes.py +83 -0
  11. external/Metric3D/training/mono/configs/_base_/datasets/_data_base_.py +12 -0
  12. external/Metric3D/training/mono/configs/_base_/datasets/argovers2.py +74 -0
  13. external/Metric3D/training/mono/configs/_base_/datasets/blended_mvg.py +78 -0
  14. external/Metric3D/training/mono/configs/_base_/datasets/cityscapes.py +79 -0
  15. external/Metric3D/training/mono/configs/_base_/datasets/ddad.py +80 -0
  16. external/Metric3D/training/mono/configs/_base_/datasets/ddad_any.py +79 -0
  17. external/Metric3D/training/mono/configs/_base_/datasets/diml.py +79 -0
  18. external/Metric3D/training/mono/configs/_base_/datasets/diml_indoor.py +76 -0
  19. external/Metric3D/training/mono/configs/_base_/datasets/diode.py +80 -0
  20. external/Metric3D/training/mono/configs/_base_/datasets/drivingstereo.py +79 -0
  21. external/Metric3D/training/mono/configs/_base_/datasets/dsec.py +79 -0
  22. external/Metric3D/training/mono/configs/_base_/datasets/eth3d.py +80 -0
  23. external/Metric3D/training/mono/configs/_base_/datasets/hm3d.py +78 -0
  24. external/Metric3D/training/mono/configs/_base_/datasets/hypersim.py +71 -0
  25. external/Metric3D/training/mono/configs/_base_/datasets/ibims.py +80 -0
  26. external/Metric3D/training/mono/configs/_base_/datasets/kitti.py +80 -0
  27. external/Metric3D/training/mono/configs/_base_/datasets/leddarpixset.py +80 -0
  28. external/Metric3D/training/mono/configs/_base_/datasets/lyft.py +79 -0
  29. external/Metric3D/training/mono/configs/_base_/datasets/lyft_any.py +79 -0
  30. external/Metric3D/training/mono/configs/_base_/datasets/mapillary_psd.py +79 -0
  31. external/Metric3D/training/mono/configs/_base_/datasets/matterport3d.py +78 -0
  32. external/Metric3D/training/mono/configs/_base_/datasets/nuscenes.py +79 -0
  33. external/Metric3D/training/mono/configs/_base_/datasets/nuscenes_any.py +79 -0
  34. external/Metric3D/training/mono/configs/_base_/datasets/nyu.py +80 -0
  35. external/Metric3D/training/mono/configs/_base_/datasets/pandaset.py +79 -0
  36. external/Metric3D/training/mono/configs/_base_/datasets/replica.py +78 -0
  37. external/Metric3D/training/mono/configs/_base_/datasets/scannet.py +80 -0
  38. external/Metric3D/training/mono/configs/_base_/datasets/scannet_all.py +80 -0
  39. external/Metric3D/training/mono/configs/_base_/datasets/taskonomy.py +78 -0
  40. external/Metric3D/training/mono/configs/_base_/datasets/uasol.py +74 -0
  41. external/Metric3D/training/mono/configs/_base_/datasets/vkitti.py +80 -0
  42. external/Metric3D/training/mono/configs/_base_/datasets/waymo.py +80 -0
  43. external/Metric3D/training/mono/configs/_base_/default_runtime.py +23 -0
  44. external/Metric3D/training/mono/configs/_base_/losses/all_losses.py +26 -0
  45. external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_giant2_reg.py +7 -0
  46. external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_large_reg.py +7 -0
  47. external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_small_reg.py +7 -0
  48. external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py +19 -0
  49. external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py +19 -0
  50. external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py +19 -0
Orient_Anything/render/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # flake8: noqa
2
+ from .core import render
3
+ from .model import Model
Orient_Anything/render/core.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import typing as t
2
+ from functools import partial
3
+
4
+ import numpy as np
5
+ from copy import deepcopy
6
+ from .canvas import Canvas
7
+
8
+ from . import speedup
9
+
10
+
11
+ # 2D part
12
+
13
+
14
+ class Vec2d:
15
+ __slots__ = "x", "y", "arr"
16
+
17
+ def __init__(self, *args):
18
+ if len(args) == 1 and isinstance(args[0], Vec3d):
19
+ self.arr = Vec3d.narr
20
+ else:
21
+ assert len(args) == 2
22
+ self.arr = list(args)
23
+
24
+ self.x, self.y = [d if isinstance(d, int) else int(d + 0.5) for d in self.arr]
25
+
26
+ def __repr__(self):
27
+ return f"Vec2d({self.x}, {self.y})"
28
+
29
+ def __truediv__(self, other):
30
+ return (self.y - other.y) / (self.x - other.x)
31
+
32
+ def __eq__(self, other):
33
+ return self.x == other.x and self.y == other.y
34
+
35
+
36
+ def draw_line(
37
+ v1: Vec2d, v2: Vec2d, canvas: Canvas, color: t.Union[tuple, str] = "white"
38
+ ):
39
+ """
40
+ Draw a line with a specified color
41
+
42
+ https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
43
+ """
44
+ v1, v2 = deepcopy(v1), deepcopy(v2)
45
+ if v1 == v2:
46
+ canvas.draw((v1.x, v1.y), color=color)
47
+ return
48
+
49
+ steep = abs(v1.y - v2.y) > abs(v1.x - v2.x)
50
+ if steep:
51
+ v1.x, v1.y = v1.y, v1.x
52
+ v2.x, v2.y = v2.y, v2.x
53
+ v1, v2 = (v1, v2) if v1.x < v2.x else (v2, v1)
54
+ slope = abs((v1.y - v2.y) / (v1.x - v2.x))
55
+ y = v1.y
56
+ error: float = 0
57
+ incr = 1 if v1.y < v2.y else -1
58
+ dots = []
59
+ for x in range(int(v1.x), int(v2.x + 0.5)):
60
+ dots.append((int(y), x) if steep else (x, int(y)))
61
+ error += slope
62
+ if abs(error) >= 0.5:
63
+ y += incr
64
+ error -= 1
65
+
66
+ canvas.draw(dots, color=color)
67
+
68
+
69
+ def draw_triangle(v1, v2, v3, canvas, color, wireframe=False):
70
+ """
71
+ Draw a triangle with 3 ordered vertices
72
+
73
+ http://www.sunshine2k.de/coding/java/TriangleRasterization/TriangleRasterization.html
74
+ """
75
+ _draw_line = partial(draw_line, canvas=canvas, color=color)
76
+
77
+ if wireframe:
78
+ _draw_line(v1, v2)
79
+ _draw_line(v2, v3)
80
+ _draw_line(v1, v3)
81
+ return
82
+
83
+ def sort_vertices_asc_by_y(vertices):
84
+ return sorted(vertices, key=lambda v: v.y)
85
+
86
+ def fill_bottom_flat_triangle(v1, v2, v3):
87
+ invslope1 = (v2.x - v1.x) / (v2.y - v1.y)
88
+ invslope2 = (v3.x - v1.x) / (v3.y - v1.y)
89
+
90
+ x1 = x2 = v1.x
91
+ y = v1.y
92
+
93
+ while y <= v2.y:
94
+ _draw_line(Vec2d(x1, y), Vec2d(x2, y))
95
+ x1 += invslope1
96
+ x2 += invslope2
97
+ y += 1
98
+
99
+ def fill_top_flat_triangle(v1, v2, v3):
100
+ invslope1 = (v3.x - v1.x) / (v3.y - v1.y)
101
+ invslope2 = (v3.x - v2.x) / (v3.y - v2.y)
102
+
103
+ x1 = x2 = v3.x
104
+ y = v3.y
105
+
106
+ while y > v2.y:
107
+ _draw_line(Vec2d(x1, y), Vec2d(x2, y))
108
+ x1 -= invslope1
109
+ x2 -= invslope2
110
+ y -= 1
111
+
112
+ v1, v2, v3 = sort_vertices_asc_by_y((v1, v2, v3))
113
+
114
+ # 填充
115
+ if v1.y == v2.y == v3.y:
116
+ pass
117
+ elif v2.y == v3.y:
118
+ fill_bottom_flat_triangle(v1, v2, v3)
119
+ elif v1.y == v2.y:
120
+ fill_top_flat_triangle(v1, v2, v3)
121
+ else:
122
+ v4 = Vec2d(int(v1.x + (v2.y - v1.y) / (v3.y - v1.y) * (v3.x - v1.x)), v2.y)
123
+ fill_bottom_flat_triangle(v1, v2, v4)
124
+ fill_top_flat_triangle(v2, v4, v3)
125
+
126
+
127
+ # 3D part
128
+
129
+
130
+ class Vec3d:
131
+ __slots__ = "x", "y", "z", "arr"
132
+
133
+ def __init__(self, *args):
134
+ # for Vec4d cast
135
+ if len(args) == 1 and isinstance(args[0], Vec4d):
136
+ vec4 = args[0]
137
+ arr_value = (vec4.x, vec4.y, vec4.z)
138
+ else:
139
+ assert len(args) == 3
140
+ arr_value = args
141
+ self.arr = np.array(arr_value, dtype=np.float64)
142
+ self.x, self.y, self.z = self.arr
143
+
144
+ def __repr__(self):
145
+ return repr(f"Vec3d({','.join([repr(d) for d in self.arr])})")
146
+
147
+ def __sub__(self, other):
148
+ return self.__class__(*[ds - do for ds, do in zip(self.arr, other.arr)])
149
+
150
+ def __bool__(self):
151
+ """ False for zero vector (0, 0, 0)
152
+ """
153
+ return any(self.arr)
154
+
155
+
156
+ class Mat4d:
157
+ def __init__(self, narr=None, value=None):
158
+ self.value = np.matrix(narr) if value is None else value
159
+
160
+ def __repr__(self):
161
+ return repr(self.value)
162
+
163
+ def __mul__(self, other):
164
+ return self.__class__(value=self.value * other.value)
165
+
166
+
167
+ class Vec4d(Mat4d):
168
+ def __init__(self, *narr, value=None):
169
+ if value is not None:
170
+ self.value = value
171
+ elif len(narr) == 1 and isinstance(narr[0], Mat4d):
172
+ self.value = narr[0].value
173
+ else:
174
+ assert len(narr) == 4
175
+ self.value = np.matrix([[d] for d in narr])
176
+
177
+ self.x, self.y, self.z, self.w = (
178
+ self.value[0, 0],
179
+ self.value[1, 0],
180
+ self.value[2, 0],
181
+ self.value[3, 0],
182
+ )
183
+ self.arr = self.value.reshape((1, 4))
184
+
185
+
186
+ # Math util
187
+ def normalize(v: Vec3d):
188
+ return Vec3d(*speedup.normalize(*v.arr))
189
+
190
+
191
+ def dot_product(a: Vec3d, b: Vec3d):
192
+ return speedup.dot_product(*a.arr, *b.arr)
193
+
194
+
195
+ def cross_product(a: Vec3d, b: Vec3d):
196
+ return Vec3d(*speedup.cross_product(*a.arr, *b.arr))
197
+
198
+ BASE_LIGHT = 0.9
199
+ def get_light_intensity(face) -> float:
200
+ # lights = [Vec3d(-2, 4, -10), Vec3d(10, 4, -2), Vec3d(8, 8, -8), Vec3d(0, 0, -8)]
201
+ lights = [Vec3d(-2, 4, -10)]
202
+ # lights = []
203
+
204
+ v1, v2, v3 = face
205
+ up = normalize(cross_product(v2 - v1, v3 - v1))
206
+ intensity = BASE_LIGHT
207
+ for light in lights:
208
+ intensity += dot_product(up, normalize(light))*0.2
209
+ return intensity
210
+
211
+
212
+ def look_at(eye: Vec3d, target: Vec3d, up: Vec3d = Vec3d(0, -1, 0)) -> Mat4d:
213
+ """
214
+ http://www.songho.ca/opengl/gl_camera.html#lookat
215
+
216
+ Args:
217
+ eye: 摄像机的世界坐标位置
218
+ target: 观察点的位置
219
+ up: 就是你想让摄像机立在哪个方向
220
+ https://stackoverflow.com/questions/10635947/what-exactly-is-the-up-vector-in-opengls-lookat-function
221
+ 这里默认使用了 0, -1, 0, 因为 blender 导出来的模型数据似乎有问题,导致y轴总是反的,于是把摄像机的up也翻一下得了。
222
+ """
223
+ f = normalize(eye - target)
224
+ l = normalize(cross_product(up, f)) # noqa: E741
225
+ u = cross_product(f, l)
226
+
227
+ rotate_matrix = Mat4d(
228
+ [[l.x, l.y, l.z, 0], [u.x, u.y, u.z, 0], [f.x, f.y, f.z, 0], [0, 0, 0, 1.0]]
229
+ )
230
+ translate_matrix = Mat4d(
231
+ [[1, 0, 0, -eye.x], [0, 1, 0, -eye.y], [0, 0, 1, -eye.z], [0, 0, 0, 1.0]]
232
+ )
233
+
234
+ return Mat4d(value=(rotate_matrix * translate_matrix).value)
235
+
236
+
237
+ def perspective_project(r, t, n, f, b=None, l=None): # noqa: E741
238
+ """
239
+ 目的:
240
+ 把相机坐标转换成投影在视网膜的范围在(-1, 1)的笛卡尔坐标
241
+
242
+ 原理:
243
+ 对于x,y坐标,相似三角形可以算出投影点的x,y
244
+ 对于z坐标,是假设了near是-1,far是1,然后带进去算的
245
+ http://www.songho.ca/opengl/gl_projectionmatrix.html
246
+ https://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/opengl-perspective-projection-matrix
247
+
248
+ 推导出来的矩阵:
249
+ [
250
+ 2n/(r-l) 0 (r+l/r-l) 0
251
+ 0 2n/(t-b) (t+b)/(t-b) 0
252
+ 0 0 -(f+n)/f-n (-2*f*n)/(f-n)
253
+ 0 0 -1 0
254
+ ]
255
+
256
+ 实际上由于我们用的视网膜(near pane)是个关于远点对称的矩形,所以矩阵简化为:
257
+ [
258
+ n/r 0 0 0
259
+ 0 n/t 0 0
260
+ 0 0 -(f+n)/f-n (-2*f*n)/(f-n)
261
+ 0 0 -1 0
262
+ ]
263
+
264
+ Args:
265
+ r: right, t: top, n: near, f: far, b: bottom, l: left
266
+ """
267
+ return Mat4d(
268
+ [
269
+ [n / r, 0, 0, 0],
270
+ [0, n / t, 0, 0],
271
+ [0, 0, -(f + n) / (f - n), (-2 * f * n) / (f - n)],
272
+ [0, 0, -1, 0],
273
+ ]
274
+ )
275
+
276
+
277
+ def draw(screen_vertices, world_vertices, model, canvas, wireframe=True):
278
+ """standard algorithm
279
+ """
280
+ for triangle_indices in model.indices:
281
+ vertex_group = [screen_vertices[idx - 1] for idx in triangle_indices]
282
+ face = [Vec3d(world_vertices[idx - 1]) for idx in triangle_indices]
283
+ if wireframe:
284
+ draw_triangle(*vertex_group, canvas=canvas, color="black", wireframe=True)
285
+ else:
286
+ intensity = get_light_intensity(face)
287
+ if intensity > 0:
288
+ draw_triangle(
289
+ *vertex_group, canvas=canvas, color=(int(intensity * 255),) * 3
290
+ )
291
+
292
+
293
+ def draw_with_z_buffer(screen_vertices, world_vertices, model, canvas):
294
+ """ z-buffer algorithm
295
+ """
296
+ intensities = []
297
+ triangles = []
298
+ for i, triangle_indices in enumerate(model.indices):
299
+ screen_triangle = [screen_vertices[idx - 1] for idx in triangle_indices]
300
+ uv_triangle = [model.uv_vertices[idx - 1] for idx in model.uv_indices[i]]
301
+ world_triangle = [Vec3d(world_vertices[idx - 1]) for idx in triangle_indices]
302
+ intensities.append(abs(get_light_intensity(world_triangle)))
303
+ # take off the class to let Cython work
304
+ triangles.append(
305
+ [np.append(screen_triangle[i].arr, uv_triangle[i]) for i in range(3)]
306
+ )
307
+
308
+ faces = speedup.generate_faces(
309
+ np.array(triangles, dtype=np.float64), model.texture_width, model.texture_height
310
+ )
311
+ for face_dots in faces:
312
+ for dot in face_dots:
313
+ intensity = intensities[dot[0]]
314
+ u, v = dot[3], dot[4]
315
+ color = model.texture_array[u, v]
316
+ canvas.draw((dot[1], dot[2]), tuple(int(c * intensity) for c in color[:3]))
317
+ # TODO: add object rendering mode (no texture)
318
+ # canvas.draw((dot[1], dot[2]), (int(255 * intensity),) * 3)
319
+
320
+
321
+ def render(model, height, width, filename, cam_loc, wireframe=False):
322
+ """
323
+ Args:
324
+ model: the Model object
325
+ height: cavas height
326
+ width: cavas width
327
+ picname: picture file name
328
+ """
329
+ model_matrix = Mat4d([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
330
+ # TODO: camera configration
331
+ view_matrix = look_at(Vec3d(cam_loc[0], cam_loc[1], cam_loc[2]), Vec3d(0, 0, 0))
332
+ projection_matrix = perspective_project(0.5, 0.5, 3, 1000)
333
+
334
+ world_vertices = []
335
+
336
+ def mvp(v):
337
+ world_vertex = model_matrix * v
338
+ world_vertices.append(Vec4d(world_vertex))
339
+ return projection_matrix * view_matrix * world_vertex
340
+
341
+ def ndc(v):
342
+ """
343
+ 各个坐标同时除以 w,得到 NDC 坐标
344
+ """
345
+ v = v.value
346
+ w = v[3, 0]
347
+ x, y, z = v[0, 0] / w, v[1, 0] / w, v[2, 0] / w
348
+ return Mat4d([[x], [y], [z], [1 / w]])
349
+
350
+ def viewport(v):
351
+ x = y = 0
352
+ w, h = width, height
353
+ n, f = 0.3, 1000
354
+ return Vec3d(
355
+ w * 0.5 * v.value[0, 0] + x + w * 0.5,
356
+ h * 0.5 * v.value[1, 0] + y + h * 0.5,
357
+ 0.5 * (f - n) * v.value[2, 0] + 0.5 * (f + n),
358
+ )
359
+
360
+ # the render pipeline
361
+ screen_vertices = [viewport(ndc(mvp(v))) for v in model.vertices]
362
+
363
+ with Canvas(filename, height, width) as canvas:
364
+ if wireframe:
365
+ draw(screen_vertices, world_vertices, model, canvas)
366
+ else:
367
+ draw_with_z_buffer(screen_vertices, world_vertices, model, canvas)
368
+
369
+ render_img = canvas.add_white_border().copy()
370
+ return render_img
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.kitti.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/nyu.py',
5
+ '../_base_/datasets/kitti.py'
6
+ ]
7
+
8
+ import numpy as np
9
+ model=dict(
10
+ decode_head=dict(
11
+ type='RAFTDepthNormalDPT5',
12
+ iters=8,
13
+ n_downsample=2,
14
+ detach=False,
15
+ ),
16
+ )
17
+
18
+ # loss method
19
+ losses=dict(
20
+ decoder_losses=[
21
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=0.1),
22
+ dict(type='GRUSequenceLoss', loss_weight=1.0, loss_gamma=0.9, stereo_sup=0),
23
+ dict(type='DeNoConsistencyLoss', loss_weight=0.001, loss_fn='CEL', scale=2)
24
+ ],
25
+ )
26
+
27
+ data_array = [
28
+
29
+ [
30
+ dict(KITTI='KITTI_dataset'),
31
+ ],
32
+ ]
33
+
34
+
35
+
36
+ # configs of the canonical space
37
+ data_basic=dict(
38
+ canonical_space = dict(
39
+ # img_size=(540, 960),
40
+ focal_length=1000.0,
41
+ ),
42
+ depth_range=(0, 1),
43
+ depth_normalize=(0.1, 200),
44
+ # crop_size=(544, 1216),
45
+ # crop_size = (544, 992),
46
+ crop_size = (616, 1064), # %28 = 0
47
+ )
48
+
49
+ # online evaluation
50
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
51
+ #log_interval = 100
52
+
53
+ interval = 4000
54
+ log_interval = 100
55
+ evaluation = dict(
56
+ online_eval=False,
57
+ interval=interval,
58
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
59
+ multi_dataset_eval=True,
60
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
61
+ )
62
+
63
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
64
+ checkpoint_config = dict(by_epoch=False, interval=interval)
65
+ runner = dict(type='IterBasedRunner_AMP', max_iters=20010)
66
+
67
+ # optimizer
68
+ optimizer = dict(
69
+ type='AdamW',
70
+ encoder=dict(lr=5e-7, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
71
+ decoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
72
+ strict_match = True
73
+ )
74
+ # schedule
75
+ lr_config = dict(policy='poly',
76
+ warmup='linear',
77
+ warmup_iters=20,
78
+ warmup_ratio=1e-6,
79
+ power=0.9, min_lr=1e-8, by_epoch=False)
80
+
81
+ acc_batch = 1
82
+ batchsize_per_gpu = 2
83
+ thread_per_gpu = 2
84
+
85
+ KITTI_dataset=dict(
86
+ data = dict(
87
+ train=dict(
88
+ pipeline=[dict(type='BGR2RGB'),
89
+ dict(type='LabelScaleCononical'),
90
+ dict(type='RandomResize',
91
+ prob=0.5,
92
+ ratio_range=(0.85, 1.15),
93
+ is_lidar=True),
94
+ dict(type='RandomCrop',
95
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
96
+ crop_type='rand',
97
+ ignore_label=-1,
98
+ padding=[0, 0, 0]),
99
+ dict(type='RandomEdgeMask',
100
+ mask_maxsize=50,
101
+ prob=0.2,
102
+ rgb_invalid=[0,0,0],
103
+ label_invalid=-1,),
104
+ dict(type='RandomHorizontalFlip',
105
+ prob=0.4),
106
+ dict(type='PhotoMetricDistortion',
107
+ to_gray_prob=0.1,
108
+ distortion_prob=0.1,),
109
+ dict(type='Weather',
110
+ prob=0.05),
111
+ dict(type='RandomBlur',
112
+ prob=0.05),
113
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
114
+ dict(type='ToTensor'),
115
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
116
+ ],
117
+ #sample_size = 10,
118
+ ),
119
+ val=dict(
120
+ pipeline=[dict(type='BGR2RGB'),
121
+ dict(type='LabelScaleCononical'),
122
+ dict(type='RandomCrop',
123
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
124
+ crop_type='center',
125
+ ignore_label=-1,
126
+ padding=[0, 0, 0]),
127
+ dict(type='ToTensor'),
128
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
129
+ ],
130
+ sample_size = 1200,
131
+ ),
132
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.nyu.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/nyu.py',
5
+ '../_base_/datasets/kitti.py'
6
+ ]
7
+
8
+ import numpy as np
9
+ model=dict(
10
+ decode_head=dict(
11
+ type='RAFTDepthNormalDPT5',
12
+ iters=8,
13
+ n_downsample=2,
14
+ detach=False,
15
+ ),
16
+ )
17
+
18
+ # loss method
19
+ losses=dict(
20
+ decoder_losses=[
21
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
22
+ dict(type='GRUSequenceLoss', loss_weight=1.0, loss_gamma=0.9, stereo_sup=0),
23
+ dict(type='NormalBranchLoss', loss_weight=1.5, loss_fn='NLL_ours_GRU'),
24
+ dict(type='DeNoConsistencyLoss', loss_weight=0.001, loss_fn='CEL', scale=2),
25
+ dict(type='HDNRandomLoss', loss_weight=0.5, random_num=10),
26
+ dict(type='HDSNRandomLoss', loss_weight=0.5, random_num=20, batch_limit=4),
27
+ dict(type='PWNPlanesLoss', loss_weight=1),
28
+ ],
29
+ )
30
+
31
+ data_array = [
32
+
33
+ [
34
+ dict(NYU='NYU_dataset'),
35
+ ],
36
+ ]
37
+
38
+
39
+
40
+ # configs of the canonical space
41
+ data_basic=dict(
42
+ canonical_space = dict(
43
+ # img_size=(540, 960),
44
+ focal_length=1000.0,
45
+ ),
46
+ depth_range=(0, 1),
47
+ depth_normalize=(0.1, 200),
48
+ # crop_size=(544, 1216),
49
+ # crop_size = (544, 992),
50
+ crop_size = (616, 1064), # %28 = 0
51
+ )
52
+
53
+ # online evaluation
54
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
55
+ #log_interval = 100
56
+
57
+ interval = 4000
58
+ log_interval = 200
59
+ evaluation = dict(
60
+ online_eval=False,
61
+ interval=interval,
62
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
63
+ multi_dataset_eval=True,
64
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
65
+ )
66
+
67
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
68
+ checkpoint_config = dict(by_epoch=False, interval=interval)
69
+ runner = dict(type='IterBasedRunner_AMP', max_iters=20010)
70
+
71
+ # optimizer
72
+ optimizer = dict(
73
+ type='AdamW',
74
+ encoder=dict(lr=5e-7, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
75
+ decoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
76
+ strict_match = True
77
+ )
78
+ # schedule
79
+ lr_config = dict(policy='poly',
80
+ warmup='linear',
81
+ warmup_iters=20,
82
+ warmup_ratio=1e-6,
83
+ power=0.9, min_lr=1e-8, by_epoch=False)
84
+
85
+ acc_batch = 1
86
+ batchsize_per_gpu = 2
87
+ thread_per_gpu = 2
88
+
89
+ NYU_dataset=dict(
90
+ data = dict(
91
+ train=dict(
92
+ pipeline=[dict(type='BGR2RGB'),
93
+ dict(type='LabelScaleCononical'),
94
+ dict(type='RandomResize',
95
+ prob=0.5,
96
+ ratio_range=(0.85, 1.15),
97
+ is_lidar=True),
98
+ dict(type='RandomCrop',
99
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
100
+ crop_type='rand',
101
+ ignore_label=-1,
102
+ padding=[0, 0, 0]),
103
+ dict(type='RandomEdgeMask',
104
+ mask_maxsize=50,
105
+ prob=0.2,
106
+ rgb_invalid=[0,0,0],
107
+ label_invalid=-1,),
108
+ dict(type='RandomHorizontalFlip',
109
+ prob=0.4),
110
+ dict(type='PhotoMetricDistortion',
111
+ to_gray_prob=0.1,
112
+ distortion_prob=0.1,),
113
+ dict(type='Weather',
114
+ prob=0.05),
115
+ dict(type='RandomBlur',
116
+ prob=0.05),
117
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
118
+ dict(type='ToTensor'),
119
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
120
+ ],
121
+ #sample_size = 10,
122
+ ),
123
+ val=dict(
124
+ pipeline=[dict(type='BGR2RGB'),
125
+ dict(type='LabelScaleCononical'),
126
+ dict(type='RandomCrop',
127
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
128
+ crop_type='center',
129
+ ignore_label=-1,
130
+ padding=[0, 0, 0]),
131
+ dict(type='ToTensor'),
132
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
133
+ ],
134
+ sample_size = 1200,
135
+ ),
136
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.giant2.py ADDED
@@ -0,0 +1,1048 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/ddad.py',
5
+ '../_base_/datasets/_data_base_.py',
6
+ '../_base_/datasets/argovers2.py',
7
+ '../_base_/datasets/cityscapes.py',
8
+ '../_base_/datasets/drivingstereo.py',
9
+ '../_base_/datasets/dsec.py',
10
+ '../_base_/datasets/lyft.py',
11
+ '../_base_/datasets/mapillary_psd.py',
12
+ '../_base_/datasets/diml.py',
13
+ '../_base_/datasets/taskonomy.py',
14
+ '../_base_/datasets/uasol.py',
15
+ '../_base_/datasets/pandaset.py',
16
+ '../_base_/datasets/waymo.py',
17
+
18
+ '../_base_/default_runtime.py',
19
+ '../_base_/schedules/schedule_1m.py',
20
+
21
+ '../_base_/datasets/hm3d.py',
22
+ '../_base_/datasets/matterport3d.py',
23
+ '../_base_/datasets/replica.py',
24
+ '../_base_/datasets/vkitti.py',
25
+ ]
26
+
27
+ import numpy as np
28
+ model=dict(
29
+ decode_head=dict(
30
+ type='RAFTDepthNormalDPT5',
31
+ iters=8,
32
+ n_downsample=2,
33
+ detach=False,
34
+ ),
35
+ )
36
+
37
+ # loss method
38
+ losses=dict(
39
+ decoder_losses=[
40
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
41
+ dict(type='GRUSequenceLoss', loss_weight=0.5, loss_gamma=0.9, stereo_sup=0.0),
42
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
43
+ dict(type='HDNRandomLoss', loss_weight=0.5, random_num=10),
44
+ dict(type='HDSNRandomLoss', loss_weight=0.5, random_num=20, batch_limit=4),
45
+ dict(type='PWNPlanesLoss', loss_weight=1),
46
+ dict(type='NormalBranchLoss', loss_weight=1.5, loss_fn='NLL_ours_GRU'),
47
+ dict(type='DeNoConsistencyLoss', loss_weight=0.01, loss_fn='CEL', scale=2, depth_detach=True)
48
+ ],
49
+ gru_losses=[
50
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
51
+ ],
52
+ )
53
+
54
+ data_array = [
55
+ # Outdoor 1
56
+ [
57
+ dict(UASOL='UASOL_dataset'), #13.6w
58
+ dict(Cityscapes_trainextra='Cityscapes_dataset'), #1.8w
59
+ dict(Cityscapes_sequence='Cityscapes_dataset'), #13.5w
60
+ dict(DIML='DIML_dataset'), # 12.2w
61
+ dict(Waymo='Waymo_dataset'), # 99w
62
+ ],
63
+ # Outdoor 2
64
+ [
65
+ dict(DSEC='DSEC_dataset'),
66
+ dict(Mapillary_PSD='MapillaryPSD_dataset'), # 74.2w
67
+ dict(DrivingStereo='DrivingStereo_dataset'), # 17.6w
68
+ dict(Argovers2='Argovers2_dataset'), # 285.6w
69
+ ],
70
+ # Outdoor 3
71
+ [
72
+ dict(Lyft='Lyft_dataset'), #15.8w
73
+ dict(DDAD='DDAD_dataset'), #7.4w
74
+ dict(Pandaset='Pandaset_dataset'), #3.8w
75
+ dict(Virtual_KITTI='VKITTI_dataset'), # 3.7w # syn
76
+ ],
77
+ #Indoor 1
78
+ [
79
+ dict(Replica='Replica_dataset'), # 5.6w # syn
80
+ dict(Replica_gso='Replica_dataset'), # 10.7w # syn
81
+ dict(Hypersim='Hypersim_dataset'), # 2.4w
82
+ dict(ScanNetAll='ScanNetAll_dataset'),
83
+ ],
84
+ # Indoor 2
85
+ [
86
+ dict(Taskonomy='Taskonomy_dataset'), #447.2w
87
+ dict(Matterport3D='Matterport3D_dataset'), #14.4w
88
+ dict(HM3D='HM3D_dataset'), # 200w, very noisy, sampled some data
89
+ ],
90
+ ]
91
+
92
+
93
+
94
+ # configs of the canonical space
95
+ data_basic=dict(
96
+ canonical_space = dict(
97
+ # img_size=(540, 960),
98
+ focal_length=1000.0,
99
+ ),
100
+ depth_range=(0, 1),
101
+ depth_normalize=(0.1, 200),
102
+ # crop_size=(544, 1216),
103
+ # crop_size = (544, 992),
104
+ crop_size = (616, 1064), # %28 = 0
105
+ )
106
+
107
+ log_interval = 100
108
+ acc_batch = 1
109
+ # online evaluation
110
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
111
+ interval = 40000
112
+ evaluation = dict(
113
+ online_eval=False,
114
+ interval=interval,
115
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
116
+ multi_dataset_eval=True,
117
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
118
+ )
119
+
120
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
121
+ checkpoint_config = dict(by_epoch=False, interval=interval)
122
+ runner = dict(type='IterBasedRunner_AMP', max_iters=800010)
123
+
124
+ # optimizer
125
+ optimizer = dict(
126
+ type='AdamW',
127
+ # encoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
128
+ encoder=dict(lr=8e-6, betas=(0.9, 0.999), weight_decay=1e-3, eps=1e-6),
129
+ decoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
130
+ #strict_match=True
131
+ )
132
+ # schedule
133
+ lr_config = dict(policy='poly',
134
+ warmup='linear',
135
+ warmup_iters=1000,
136
+ warmup_ratio=1e-6,
137
+ power=0.9, min_lr=1e-6, by_epoch=False)
138
+
139
+ batchsize_per_gpu = 3
140
+ thread_per_gpu = 1
141
+
142
+ Argovers2_dataset=dict(
143
+ data = dict(
144
+ train=dict(
145
+ pipeline=[dict(type='BGR2RGB'),
146
+ dict(type='LabelScaleCononical'),
147
+ dict(type='RandomResize',
148
+ prob=0.5,
149
+ ratio_range=(0.85, 1.15),
150
+ is_lidar=True),
151
+ dict(type='RandomCrop',
152
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
153
+ crop_type='rand',
154
+ ignore_label=-1,
155
+ padding=[0, 0, 0]),
156
+ dict(type='RandomEdgeMask',
157
+ mask_maxsize=50,
158
+ prob=0.2,
159
+ rgb_invalid=[0,0,0],
160
+ label_invalid=-1,),
161
+ dict(type='RandomHorizontalFlip',
162
+ prob=0.4),
163
+ dict(type='PhotoMetricDistortion',
164
+ to_gray_prob=0.1,
165
+ distortion_prob=0.1,),
166
+ dict(type='Weather',
167
+ prob=0.05),
168
+ dict(type='RandomBlur',
169
+ prob=0.05),
170
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
171
+ dict(type='ToTensor'),
172
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
173
+ ],
174
+ #sample_size = 10000,
175
+ ),
176
+ val=dict(
177
+ pipeline=[dict(type='BGR2RGB'),
178
+ dict(type='LabelScaleCononical'),
179
+ dict(type='RandomCrop',
180
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
181
+ crop_type='center',
182
+ ignore_label=-1,
183
+ padding=[0, 0, 0]),
184
+ dict(type='ToTensor'),
185
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
186
+ ],
187
+ sample_size = 1200,
188
+ ),
189
+ ))
190
+ Cityscapes_dataset=dict(
191
+ data = dict(
192
+ train=dict(
193
+ pipeline=[dict(type='BGR2RGB'),
194
+ dict(type='LabelScaleCononical'),
195
+ dict(type='RandomResize',
196
+ ratio_range=(0.85, 1.15),
197
+ is_lidar=False),
198
+ dict(type='RandomCrop',
199
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
200
+ crop_type='rand',
201
+ ignore_label=-1,
202
+ padding=[0, 0, 0]),
203
+ dict(type='RandomEdgeMask',
204
+ mask_maxsize=50,
205
+ prob=0.2,
206
+ rgb_invalid=[0,0,0],
207
+ label_invalid=-1,),
208
+ dict(type='RandomHorizontalFlip',
209
+ prob=0.4),
210
+ dict(type='PhotoMetricDistortion',
211
+ to_gray_prob=0.1,
212
+ distortion_prob=0.1,),
213
+ dict(type='Weather',
214
+ prob=0.05),
215
+ dict(type='RandomBlur',
216
+ prob=0.05),
217
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
218
+ dict(type='ToTensor'),
219
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
220
+ ],
221
+ #sample_size = 10000,
222
+ ),
223
+ val=dict(
224
+ pipeline=[dict(type='BGR2RGB'),
225
+ dict(type='LabelScaleCononical'),
226
+ dict(type='RandomCrop',
227
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
228
+ crop_type='center',
229
+ ignore_label=-1,
230
+ padding=[0, 0, 0]),
231
+ dict(type='ToTensor'),
232
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
233
+ ],
234
+ sample_size = 1200,
235
+ ),
236
+ ))
237
+ DIML_dataset=dict(
238
+ data = dict(
239
+ train=dict(
240
+ pipeline=[dict(type='BGR2RGB'),
241
+ dict(type='LabelScaleCononical'),
242
+ dict(type='RandomResize',
243
+ ratio_range=(0.85, 1.15),
244
+ is_lidar=False),
245
+ dict(type='RandomCrop',
246
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
247
+ crop_type='rand',
248
+ ignore_label=-1,
249
+ padding=[0, 0, 0]),
250
+ dict(type='RandomEdgeMask',
251
+ mask_maxsize=50,
252
+ prob=0.2,
253
+ rgb_invalid=[0,0,0],
254
+ label_invalid=-1,),
255
+ dict(type='RandomHorizontalFlip',
256
+ prob=0.4),
257
+ dict(type='PhotoMetricDistortion',
258
+ to_gray_prob=0.1,
259
+ distortion_prob=0.1,),
260
+ dict(type='Weather',
261
+ prob=0.05),
262
+ dict(type='RandomBlur',
263
+ prob=0.05),
264
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
265
+ dict(type='ToTensor'),
266
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
267
+ ],
268
+ #sample_size = 10000,
269
+ ),
270
+ val=dict(
271
+ pipeline=[dict(type='BGR2RGB'),
272
+ dict(type='LabelScaleCononical'),
273
+ dict(type='RandomCrop',
274
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
275
+ crop_type='center',
276
+ ignore_label=-1,
277
+ padding=[0, 0, 0]),
278
+ dict(type='ToTensor'),
279
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
280
+ ],
281
+ sample_size = 1200,
282
+ ),
283
+ ))
284
+ Lyft_dataset=dict(
285
+ data = dict(
286
+ train=dict(
287
+ pipeline=[dict(type='BGR2RGB'),
288
+ dict(type='LabelScaleCononical'),
289
+ dict(type='RandomResize',
290
+ prob=0.5,
291
+ ratio_range=(0.85, 1.15),
292
+ is_lidar=True),
293
+ dict(type='RandomCrop',
294
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
295
+ crop_type='rand',
296
+ ignore_label=-1,
297
+ padding=[0, 0, 0]),
298
+ dict(type='RandomEdgeMask',
299
+ mask_maxsize=50,
300
+ prob=0.2,
301
+ rgb_invalid=[0,0,0],
302
+ label_invalid=-1,),
303
+ dict(type='RandomHorizontalFlip',
304
+ prob=0.4),
305
+ dict(type='PhotoMetricDistortion',
306
+ to_gray_prob=0.1,
307
+ distortion_prob=0.1,),
308
+ dict(type='Weather',
309
+ prob=0.05),
310
+ dict(type='RandomBlur',
311
+ prob=0.05),
312
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
313
+ dict(type='ToTensor'),
314
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
315
+ ],
316
+ #sample_size = 10000,
317
+ ),
318
+ val=dict(
319
+ pipeline=[dict(type='BGR2RGB'),
320
+ dict(type='LabelScaleCononical'),
321
+ dict(type='RandomCrop',
322
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
323
+ crop_type='center',
324
+ ignore_label=-1,
325
+ padding=[0, 0, 0]),
326
+ dict(type='ToTensor'),
327
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
328
+ ],
329
+ sample_size = 1200,
330
+ ),
331
+ ))
332
+ DDAD_dataset=dict(
333
+ data = dict(
334
+ train=dict(
335
+ pipeline=[dict(type='BGR2RGB'),
336
+ dict(type='LabelScaleCononical'),
337
+ dict(type='RandomResize',
338
+ prob=0.5,
339
+ ratio_range=(0.85, 1.15),
340
+ is_lidar=True),
341
+ dict(type='RandomCrop',
342
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
343
+ crop_type='rand',
344
+ ignore_label=-1,
345
+ padding=[0, 0, 0]),
346
+ dict(type='RandomEdgeMask',
347
+ mask_maxsize=50,
348
+ prob=0.2,
349
+ rgb_invalid=[0,0,0],
350
+ label_invalid=-1,),
351
+ dict(type='RandomHorizontalFlip',
352
+ prob=0.4),
353
+ dict(type='PhotoMetricDistortion',
354
+ to_gray_prob=0.1,
355
+ distortion_prob=0.1,),
356
+ dict(type='Weather',
357
+ prob=0.05),
358
+ dict(type='RandomBlur',
359
+ prob=0.05),
360
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
361
+ dict(type='ToTensor'),
362
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
363
+ ],
364
+ #sample_size = 10000,
365
+ ),
366
+ val=dict(
367
+ pipeline=[dict(type='BGR2RGB'),
368
+ dict(type='LabelScaleCononical'),
369
+ dict(type='RandomCrop',
370
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
371
+ crop_type='center',
372
+ ignore_label=-1,
373
+ padding=[0, 0, 0]),
374
+ dict(type='ToTensor'),
375
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
376
+ ],
377
+ # sample_size = 1200,
378
+ ),
379
+ ))
380
+ DSEC_dataset=dict(
381
+ data = dict(
382
+ train=dict(
383
+ pipeline=[dict(type='BGR2RGB'),
384
+ dict(type='LabelScaleCononical'),
385
+ dict(type='RandomResize',
386
+ prob=0.5,
387
+ ratio_range=(0.85, 1.15),
388
+ is_lidar=True),
389
+ dict(type='RandomCrop',
390
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
391
+ crop_type='rand',
392
+ ignore_label=-1,
393
+ padding=[0, 0, 0]),
394
+ dict(type='RandomEdgeMask',
395
+ mask_maxsize=50,
396
+ prob=0.2,
397
+ rgb_invalid=[0,0,0],
398
+ label_invalid=-1,),
399
+ dict(type='RandomHorizontalFlip',
400
+ prob=0.4),
401
+ dict(type='PhotoMetricDistortion',
402
+ to_gray_prob=0.1,
403
+ distortion_prob=0.1,),
404
+ dict(type='Weather',
405
+ prob=0.05),
406
+ dict(type='RandomBlur',
407
+ prob=0.05),
408
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
409
+ dict(type='ToTensor'),
410
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
411
+ ],
412
+ #sample_size = 10000,
413
+ ),
414
+ val=dict(
415
+ pipeline=[dict(type='BGR2RGB'),
416
+ dict(type='LabelScaleCononical'),
417
+ dict(type='RandomCrop',
418
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
419
+ crop_type='center',
420
+ ignore_label=-1,
421
+ padding=[0, 0, 0]),
422
+ dict(type='ToTensor'),
423
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
424
+ ],
425
+ sample_size = 1200,
426
+ ),
427
+ ))
428
+ DrivingStereo_dataset=dict(
429
+ data = dict(
430
+ train=dict(
431
+ pipeline=[dict(type='BGR2RGB'),
432
+ dict(type='LabelScaleCononical'),
433
+ dict(type='RandomResize',
434
+ ratio_range=(0.85, 1.15),
435
+ is_lidar=False),
436
+ dict(type='RandomCrop',
437
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
438
+ crop_type='rand',
439
+ ignore_label=-1,
440
+ padding=[0, 0, 0]),
441
+ dict(type='RandomEdgeMask',
442
+ mask_maxsize=50,
443
+ prob=0.2,
444
+ rgb_invalid=[0,0,0],
445
+ label_invalid=-1,),
446
+ dict(type='RandomHorizontalFlip',
447
+ prob=0.4),
448
+ dict(type='PhotoMetricDistortion',
449
+ to_gray_prob=0.1,
450
+ distortion_prob=0.1,),
451
+ dict(type='Weather',
452
+ prob=0.05),
453
+ dict(type='RandomBlur',
454
+ prob=0.05),
455
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
456
+ dict(type='ToTensor'),
457
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
458
+ ],
459
+ #sample_size = 10000,
460
+ ),
461
+ val=dict(
462
+ pipeline=[dict(type='BGR2RGB'),
463
+ dict(type='LabelScaleCononical'),
464
+ dict(type='RandomCrop',
465
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
466
+ crop_type='center',
467
+ ignore_label=-1,
468
+ padding=[0, 0, 0]),
469
+ dict(type='ToTensor'),
470
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
471
+ ],
472
+ sample_size = 1200,
473
+ ),
474
+ ))
475
+ MapillaryPSD_dataset=dict(
476
+ data = dict(
477
+ train=dict(
478
+ pipeline=[dict(type='BGR2RGB'),
479
+ dict(type='LabelScaleCononical'),
480
+ dict(type='RandomResize',
481
+ prob=0.5,
482
+ ratio_range=(0.85, 1.15),
483
+ is_lidar=True),
484
+ dict(type='RandomCrop',
485
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
486
+ crop_type='rand',
487
+ ignore_label=-1,
488
+ padding=[0, 0, 0]),
489
+ dict(type='RandomEdgeMask',
490
+ mask_maxsize=50,
491
+ prob=0.2,
492
+ rgb_invalid=[0,0,0],
493
+ label_invalid=-1,),
494
+ dict(type='RandomHorizontalFlip',
495
+ prob=0.4),
496
+ dict(type='PhotoMetricDistortion',
497
+ to_gray_prob=0.1,
498
+ distortion_prob=0.1,),
499
+ dict(type='Weather',
500
+ prob=0.05),
501
+ dict(type='RandomBlur',
502
+ prob=0.05),
503
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
504
+ dict(type='ToTensor'),
505
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
506
+ ],
507
+ #sample_size = 10000,
508
+ ),
509
+ val=dict(
510
+ pipeline=[dict(type='BGR2RGB'),
511
+ dict(type='LabelScaleCononical'),
512
+ dict(type='RandomCrop',
513
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
514
+ crop_type='center',
515
+ ignore_label=-1,
516
+ padding=[0, 0, 0]),
517
+ dict(type='ToTensor'),
518
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
519
+ ],
520
+ sample_size = 1200,
521
+ ),
522
+ ))
523
+ Pandaset_dataset=dict(
524
+ data = dict(
525
+ train=dict(
526
+ pipeline=[dict(type='BGR2RGB'),
527
+ dict(type='LabelScaleCononical'),
528
+ dict(type='RandomResize',
529
+ prob=0.5,
530
+ ratio_range=(0.85, 1.15),
531
+ is_lidar=True),
532
+ dict(type='RandomCrop',
533
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
534
+ crop_type='rand',
535
+ ignore_label=-1,
536
+ padding=[0, 0, 0]),
537
+ dict(type='RandomEdgeMask',
538
+ mask_maxsize=50,
539
+ prob=0.2,
540
+ rgb_invalid=[0,0,0],
541
+ label_invalid=-1,),
542
+ dict(type='RandomHorizontalFlip',
543
+ prob=0.4),
544
+ dict(type='PhotoMetricDistortion',
545
+ to_gray_prob=0.1,
546
+ distortion_prob=0.1,),
547
+ dict(type='Weather',
548
+ prob=0.05),
549
+ dict(type='RandomBlur',
550
+ prob=0.05),
551
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
552
+ dict(type='ToTensor'),
553
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
554
+ ],
555
+ #sample_size = 10000,
556
+ ),
557
+ val=dict(
558
+ pipeline=[dict(type='BGR2RGB'),
559
+ dict(type='LabelScaleCononical'),
560
+ dict(type='RandomCrop',
561
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
562
+ crop_type='center',
563
+ ignore_label=-1,
564
+ padding=[0, 0, 0]),
565
+ dict(type='ToTensor'),
566
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
567
+ ],
568
+ sample_size = 1200,
569
+ ),
570
+ ))
571
+ Taskonomy_dataset=dict(
572
+ data = dict(
573
+ train=dict(
574
+ pipeline=[dict(type='BGR2RGB'),
575
+ dict(type='LabelScaleCononical'),
576
+ dict(type='RandomResize',
577
+ prob=0.5,
578
+ ratio_range=(0.85, 1.15),
579
+ is_lidar=False),
580
+ dict(type='RandomCrop',
581
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
582
+ crop_type='rand',
583
+ ignore_label=-1,
584
+ padding=[0, 0, 0]),
585
+ dict(type='RandomEdgeMask',
586
+ mask_maxsize=50,
587
+ prob=0.2,
588
+ rgb_invalid=[0,0,0],
589
+ label_invalid=-1,),
590
+ dict(type='RandomHorizontalFlip',
591
+ prob=0.4),
592
+ dict(type='PhotoMetricDistortion',
593
+ to_gray_prob=0.1,
594
+ distortion_prob=0.1,),
595
+ dict(type='Weather',
596
+ prob=0.05),
597
+ dict(type='RandomBlur',
598
+ prob=0.05),
599
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
600
+ dict(type='ToTensor'),
601
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
602
+ ],
603
+ #sample_size = 10000,
604
+ ),
605
+ val=dict(
606
+ pipeline=[dict(type='BGR2RGB'),
607
+ dict(type='LabelScaleCononical'),
608
+ dict(type='RandomCrop',
609
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
610
+ crop_type='center',
611
+ ignore_label=-1,
612
+ padding=[0, 0, 0]),
613
+ dict(type='ToTensor'),
614
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
615
+ ],
616
+ sample_size = 1200,
617
+ ),
618
+ ))
619
+ UASOL_dataset=dict(
620
+ data = dict(
621
+ train=dict(
622
+ pipeline=[dict(type='BGR2RGB'),
623
+ dict(type='LabelScaleCononical'),
624
+ dict(type='RandomResize',
625
+ prob=0.5,
626
+ ratio_range=(0.85, 1.15),
627
+ is_lidar=False),
628
+ dict(type='RandomCrop',
629
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
630
+ crop_type='rand',
631
+ ignore_label=-1,
632
+ padding=[0, 0, 0]),
633
+ dict(type='RandomEdgeMask',
634
+ mask_maxsize=50,
635
+ prob=0.2,
636
+ rgb_invalid=[0,0,0],
637
+ label_invalid=-1,),
638
+ dict(type='RandomHorizontalFlip',
639
+ prob=0.4),
640
+ dict(type='PhotoMetricDistortion',
641
+ to_gray_prob=0.1,
642
+ distortion_prob=0.1,),
643
+ dict(type='Weather',
644
+ prob=0.05),
645
+ dict(type='RandomBlur',
646
+ prob=0.05),
647
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
648
+ dict(type='ToTensor'),
649
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
650
+ ],
651
+ #sample_size = 10000,
652
+ ),
653
+ val=dict(
654
+ pipeline=[dict(type='BGR2RGB'),
655
+ dict(type='LabelScaleCononical'),
656
+ dict(type='RandomCrop',
657
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
658
+ crop_type='center',
659
+ ignore_label=-1,
660
+ padding=[0, 0, 0]),
661
+ dict(type='ToTensor'),
662
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
663
+ ],
664
+ sample_size = 1200,
665
+ ),
666
+ ))
667
+ Waymo_dataset=dict(
668
+ data = dict(
669
+ train=dict(
670
+ pipeline=[dict(type='BGR2RGB'),
671
+ dict(type='LabelScaleCononical'),
672
+ dict(type='RandomResize',
673
+ prob=0.5,
674
+ ratio_range=(0.85, 1.15),
675
+ is_lidar=True),
676
+ dict(type='RandomCrop',
677
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
678
+ crop_type='rand',
679
+ ignore_label=-1,
680
+ padding=[0, 0, 0]),
681
+ dict(type='RandomEdgeMask',
682
+ mask_maxsize=50,
683
+ prob=0.2,
684
+ rgb_invalid=[0,0,0],
685
+ label_invalid=-1,),
686
+ dict(type='RandomHorizontalFlip',
687
+ prob=0.4),
688
+ dict(type='PhotoMetricDistortion',
689
+ to_gray_prob=0.1,
690
+ distortion_prob=0.1,),
691
+ dict(type='Weather',
692
+ prob=0.05),
693
+ dict(type='RandomBlur',
694
+ prob=0.05),
695
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
696
+ dict(type='ToTensor'),
697
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
698
+ ],
699
+ #sample_size = 10000,
700
+ ),
701
+ val=dict(
702
+ pipeline=[dict(type='BGR2RGB'),
703
+ dict(type='LabelScaleCononical'),
704
+ dict(type='RandomCrop',
705
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
706
+ crop_type='center',
707
+ ignore_label=-1,
708
+ padding=[0, 0, 0]),
709
+ dict(type='ToTensor'),
710
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
711
+ ],
712
+ sample_size = 1200,
713
+ ),
714
+ ))
715
+ Matterport3D_dataset=dict(
716
+ data = dict(
717
+ train=dict(
718
+ pipeline=[dict(type='BGR2RGB'),
719
+ dict(type='LabelScaleCononical'),
720
+ dict(type='RandomResize',
721
+ prob=0.5,
722
+ ratio_range=(0.85, 1.15),
723
+ is_lidar=False),
724
+ dict(type='RandomCrop',
725
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
726
+ crop_type='rand',
727
+ ignore_label=-1,
728
+ padding=[0, 0, 0]),
729
+ dict(type='RandomEdgeMask',
730
+ mask_maxsize=50,
731
+ prob=0.2,
732
+ rgb_invalid=[0,0,0],
733
+ label_invalid=-1,),
734
+ dict(type='RandomHorizontalFlip',
735
+ prob=0.4),
736
+ dict(type='PhotoMetricDistortion',
737
+ to_gray_prob=0.1,
738
+ distortion_prob=0.1,),
739
+ dict(type='Weather',
740
+ prob=0.05),
741
+ dict(type='RandomBlur',
742
+ prob=0.05),
743
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
744
+ dict(type='ToTensor'),
745
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
746
+ ],
747
+ #sample_size = 10000,
748
+ ),
749
+ val=dict(
750
+ pipeline=[dict(type='BGR2RGB'),
751
+ dict(type='LabelScaleCononical'),
752
+ dict(type='RandomCrop',
753
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
754
+ crop_type='center',
755
+ ignore_label=-1,
756
+ padding=[0, 0, 0]),
757
+ dict(type='ToTensor'),
758
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
759
+ ],
760
+ sample_size = 1200,
761
+ ),
762
+ ))
763
+ Replica_dataset=dict(
764
+ data = dict(
765
+ train=dict(
766
+ pipeline=[dict(type='BGR2RGB'),
767
+ dict(type='LabelScaleCononical'),
768
+ dict(type='RandomResize',
769
+ prob=0.5,
770
+ ratio_range=(0.85, 1.15),
771
+ is_lidar=False),
772
+ dict(type='RandomCrop',
773
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
774
+ crop_type='rand',
775
+ ignore_label=-1,
776
+ padding=[0, 0, 0]),
777
+ dict(type='RandomEdgeMask',
778
+ mask_maxsize=50,
779
+ prob=0.2,
780
+ rgb_invalid=[0,0,0],
781
+ label_invalid=-1,),
782
+ dict(type='RandomHorizontalFlip',
783
+ prob=0.4),
784
+ dict(type='PhotoMetricDistortion',
785
+ to_gray_prob=0.1,
786
+ distortion_prob=0.1,),
787
+ dict(type='Weather',
788
+ prob=0.05),
789
+ dict(type='RandomBlur',
790
+ prob=0.05),
791
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
792
+ dict(type='ToTensor'),
793
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
794
+ ],
795
+ #sample_size = 10000,
796
+ ),
797
+ val=dict(
798
+ pipeline=[dict(type='BGR2RGB'),
799
+ dict(type='LabelScaleCononical'),
800
+ dict(type='RandomCrop',
801
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
802
+ crop_type='center',
803
+ ignore_label=-1,
804
+ padding=[0, 0, 0]),
805
+ dict(type='ToTensor'),
806
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
807
+ ],
808
+ sample_size = 1200,
809
+ ),
810
+ ))
811
+ VKITTI_dataset=dict(
812
+ data = dict(
813
+ train=dict(
814
+ pipeline=[dict(type='BGR2RGB'),
815
+ dict(type='LabelScaleCononical'),
816
+ dict(type='RandomResize',
817
+ prob=0.5,
818
+ ratio_range=(0.85, 1.15),
819
+ is_lidar=False),
820
+ dict(type='RandomCrop',
821
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
822
+ crop_type='rand',
823
+ ignore_label=-1,
824
+ padding=[0, 0, 0]),
825
+ dict(type='RandomEdgeMask',
826
+ mask_maxsize=50,
827
+ prob=0.2,
828
+ rgb_invalid=[0,0,0],
829
+ label_invalid=-1,),
830
+ dict(type='RandomHorizontalFlip',
831
+ prob=0.4),
832
+ dict(type='PhotoMetricDistortion',
833
+ to_gray_prob=0.1,
834
+ distortion_prob=0.1,),
835
+ dict(type='Weather',
836
+ prob=0.05),
837
+ dict(type='RandomBlur',
838
+ prob=0.05),
839
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
840
+ dict(type='ToTensor'),
841
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
842
+ ],
843
+ #sample_size = 10000,
844
+ ),
845
+ val=dict(
846
+ pipeline=[dict(type='BGR2RGB'),
847
+ dict(type='LabelScaleCononical'),
848
+ dict(type='RandomCrop',
849
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
850
+ crop_type='center',
851
+ ignore_label=-1,
852
+ padding=[0, 0, 0]),
853
+ dict(type='ToTensor'),
854
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
855
+ ],
856
+ sample_size = 1200,
857
+ ),
858
+ ))
859
+ HM3D_dataset=dict(
860
+ data = dict(
861
+ train=dict(
862
+ pipeline=[dict(type='BGR2RGB'),
863
+ dict(type='LabelScaleCononical'),
864
+ dict(type='RandomResize',
865
+ prob=0.5,
866
+ ratio_range=(0.75, 1.3),
867
+ is_lidar=False),
868
+ dict(type='RandomCrop',
869
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
870
+ crop_type='rand',
871
+ ignore_label=-1,
872
+ padding=[0, 0, 0]),
873
+ dict(type='RandomEdgeMask',
874
+ mask_maxsize=50,
875
+ prob=0.2,
876
+ rgb_invalid=[0,0,0],
877
+ label_invalid=-1,),
878
+ dict(type='RandomHorizontalFlip',
879
+ prob=0.4),
880
+ dict(type='PhotoMetricDistortion',
881
+ to_gray_prob=0.1,
882
+ distortion_prob=0.1,),
883
+ dict(type='Weather',
884
+ prob=0.05),
885
+ dict(type='RandomBlur',
886
+ prob=0.05),
887
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
888
+ dict(type='ToTensor'),
889
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
890
+ ],
891
+ #sample_size = 10000,
892
+ ),
893
+ val=dict(
894
+ pipeline=[dict(type='BGR2RGB'),
895
+ dict(type='LabelScaleCononical'),
896
+ dict(type='RandomCrop',
897
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
898
+ crop_type='center',
899
+ ignore_label=-1,
900
+ padding=[0, 0, 0]),
901
+ dict(type='ToTensor'),
902
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
903
+ ],
904
+ sample_size = 1200,
905
+ ),
906
+ ))
907
+ BlendedMVG_omni_dataset=dict(
908
+ data = dict(
909
+ train=dict(
910
+ pipeline=[dict(type='BGR2RGB'),
911
+ dict(type='LabelScaleCononical'),
912
+ dict(type='RandomResize',
913
+ prob=0.5,
914
+ ratio_range=(0.75, 1.3),
915
+ is_lidar=False),
916
+ dict(type='RandomCrop',
917
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
918
+ crop_type='rand',
919
+ ignore_label=-1,
920
+ padding=[0, 0, 0]),
921
+ dict(type='RandomEdgeMask',
922
+ mask_maxsize=50,
923
+ prob=0.2,
924
+ rgb_invalid=[0,0,0],
925
+ label_invalid=-1,),
926
+ dict(type='RandomHorizontalFlip',
927
+ prob=0.4),
928
+ dict(type='PhotoMetricDistortion',
929
+ to_gray_prob=0.1,
930
+ distortion_prob=0.1,),
931
+ dict(type='Weather',
932
+ prob=0.05),
933
+ dict(type='RandomBlur',
934
+ prob=0.05),
935
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
936
+ dict(type='ToTensor'),
937
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
938
+ ],
939
+ ),
940
+ val=dict(
941
+ pipeline=[dict(type='BGR2RGB'),
942
+ dict(type='LabelScaleCononical'),
943
+ dict(type='RandomCrop',
944
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
945
+ crop_type='center',
946
+ ignore_label=-1,
947
+ padding=[0, 0, 0]),
948
+ dict(type='ToTensor'),
949
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
950
+ ],
951
+ ),
952
+ ))
953
+ ScanNetAll_dataset=dict(
954
+ data = dict(
955
+ train=dict(
956
+ pipeline=[dict(type='BGR2RGB'),
957
+ dict(type='LabelScaleCononical'),
958
+ dict(type='RandomResize',
959
+ prob=0.5,
960
+ ratio_range=(0.85, 1.15),
961
+ is_lidar=False),
962
+ dict(type='RandomCrop',
963
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
964
+ crop_type='rand',
965
+ ignore_label=-1,
966
+ padding=[0, 0, 0]),
967
+ dict(type='RandomEdgeMask',
968
+ mask_maxsize=50,
969
+ prob=0.2,
970
+ rgb_invalid=[0,0,0],
971
+ label_invalid=-1,),
972
+ dict(type='RandomHorizontalFlip',
973
+ prob=0.4),
974
+ dict(type='PhotoMetricDistortion',
975
+ to_gray_prob=0.1,
976
+ distortion_prob=0.1,),
977
+ dict(type='Weather',
978
+ prob=0.05),
979
+ dict(type='RandomBlur',
980
+ prob=0.05),
981
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
982
+ dict(type='ToTensor'),
983
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
984
+ ],
985
+ #sample_size = 10000,
986
+ ),
987
+ val=dict(
988
+ pipeline=[dict(type='BGR2RGB'),
989
+ dict(type='LabelScaleCononical'),
990
+ dict(type='RandomCrop',
991
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
992
+ crop_type='center',
993
+ ignore_label=-1,
994
+ padding=[0, 0, 0]),
995
+ dict(type='ToTensor'),
996
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
997
+ ],
998
+ sample_size = 1200,
999
+ ),
1000
+ ))
1001
+ Hypersim_dataset=dict(
1002
+ data = dict(
1003
+ train=dict(
1004
+ pipeline=[dict(type='BGR2RGB'),
1005
+ dict(type='LabelScaleCononical'),
1006
+ dict(type='RandomResize',
1007
+ prob=0.5,
1008
+ ratio_range=(0.85, 1.15),
1009
+ is_lidar=False),
1010
+ dict(type='RandomCrop',
1011
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1012
+ crop_type='rand',
1013
+ ignore_label=-1,
1014
+ padding=[0, 0, 0]),
1015
+ dict(type='RandomEdgeMask',
1016
+ mask_maxsize=50,
1017
+ prob=0.2,
1018
+ rgb_invalid=[0,0,0],
1019
+ label_invalid=-1,),
1020
+ dict(type='RandomHorizontalFlip',
1021
+ prob=0.4),
1022
+ dict(type='PhotoMetricDistortion',
1023
+ to_gray_prob=0.1,
1024
+ distortion_prob=0.1,),
1025
+ dict(type='Weather',
1026
+ prob=0.05),
1027
+ dict(type='RandomBlur',
1028
+ prob=0.05),
1029
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
1030
+ dict(type='ToTensor'),
1031
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1032
+ ],
1033
+ #sample_size = 10000,
1034
+ ),
1035
+ val=dict(
1036
+ pipeline=[dict(type='BGR2RGB'),
1037
+ dict(type='LabelScaleCononical'),
1038
+ dict(type='RandomCrop',
1039
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1040
+ crop_type='center',
1041
+ ignore_label=-1,
1042
+ padding=[0, 0, 0]),
1043
+ dict(type='ToTensor'),
1044
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1045
+ ],
1046
+ sample_size = 1200,
1047
+ ),
1048
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.large.kitti.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/nyu.py',
5
+ '../_base_/datasets/kitti.py'
6
+ ]
7
+
8
+ import numpy as np
9
+ model=dict(
10
+ decode_head=dict(
11
+ type='RAFTDepthNormalDPT5',
12
+ iters=8,
13
+ n_downsample=2,
14
+ detach=False,
15
+ ),
16
+ )
17
+
18
+ # loss method
19
+ losses=dict(
20
+ decoder_losses=[
21
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=0.1),
22
+ dict(type='GRUSequenceLoss', loss_weight=1.0, loss_gamma=0.9, stereo_sup=0),
23
+ dict(type='DeNoConsistencyLoss', loss_weight=0.001, loss_fn='CEL', scale=2)
24
+ ],
25
+ )
26
+
27
+ data_array = [
28
+
29
+ [
30
+ dict(KITTI='KITTI_dataset'),
31
+ ],
32
+ ]
33
+
34
+
35
+
36
+ # configs of the canonical space
37
+ data_basic=dict(
38
+ canonical_space = dict(
39
+ # img_size=(540, 960),
40
+ focal_length=1000.0,
41
+ ),
42
+ depth_range=(0, 1),
43
+ depth_normalize=(0.1, 200),
44
+ # crop_size=(544, 1216),
45
+ # crop_size = (544, 992),
46
+ crop_size = (616, 1064), # %28 = 0
47
+ )
48
+
49
+ # online evaluation
50
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
51
+ #log_interval = 100
52
+
53
+ interval = 4000
54
+ log_interval = 100
55
+ evaluation = dict(
56
+ online_eval=False,
57
+ interval=interval,
58
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
59
+ multi_dataset_eval=True,
60
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
61
+ )
62
+
63
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
64
+ checkpoint_config = dict(by_epoch=False, interval=interval)
65
+ runner = dict(type='IterBasedRunner_AMP', max_iters=20010)
66
+
67
+ # optimizer
68
+ optimizer = dict(
69
+ type='AdamW',
70
+ encoder=dict(lr=5e-7, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
71
+ decoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=0, eps=1e-10),
72
+ strict_match = True
73
+ )
74
+ # schedule
75
+ lr_config = dict(policy='poly',
76
+ warmup='linear',
77
+ warmup_iters=20,
78
+ warmup_ratio=1e-6,
79
+ power=0.9, min_lr=1e-8, by_epoch=False)
80
+
81
+ acc_batch = 1
82
+ batchsize_per_gpu = 2
83
+ thread_per_gpu = 2
84
+
85
+ KITTI_dataset=dict(
86
+ data = dict(
87
+ train=dict(
88
+ pipeline=[dict(type='BGR2RGB'),
89
+ dict(type='LabelScaleCononical'),
90
+ dict(type='RandomResize',
91
+ prob=0.5,
92
+ ratio_range=(0.85, 1.15),
93
+ is_lidar=True),
94
+ dict(type='RandomCrop',
95
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
96
+ crop_type='rand',
97
+ ignore_label=-1,
98
+ padding=[0, 0, 0]),
99
+ dict(type='RandomEdgeMask',
100
+ mask_maxsize=50,
101
+ prob=0.2,
102
+ rgb_invalid=[0,0,0],
103
+ label_invalid=-1,),
104
+ dict(type='RandomHorizontalFlip',
105
+ prob=0.4),
106
+ dict(type='PhotoMetricDistortion',
107
+ to_gray_prob=0.1,
108
+ distortion_prob=0.1,),
109
+ dict(type='Weather',
110
+ prob=0.05),
111
+ dict(type='RandomBlur',
112
+ prob=0.05),
113
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
114
+ dict(type='ToTensor'),
115
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
116
+ ],
117
+ #sample_size = 10,
118
+ ),
119
+ val=dict(
120
+ pipeline=[dict(type='BGR2RGB'),
121
+ dict(type='LabelScaleCononical'),
122
+ dict(type='RandomCrop',
123
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
124
+ crop_type='center',
125
+ ignore_label=-1,
126
+ padding=[0, 0, 0]),
127
+ dict(type='ToTensor'),
128
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
129
+ ],
130
+ sample_size = 1200,
131
+ ),
132
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.large.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/ddad.py',
5
+ '../_base_/datasets/_data_base_.py',
6
+ '../_base_/datasets/argovers2.py',
7
+ '../_base_/datasets/cityscapes.py',
8
+ '../_base_/datasets/drivingstereo.py',
9
+ '../_base_/datasets/dsec.py',
10
+ '../_base_/datasets/lyft.py',
11
+ '../_base_/datasets/mapillary_psd.py',
12
+ '../_base_/datasets/diml.py',
13
+ '../_base_/datasets/taskonomy.py',
14
+ '../_base_/datasets/uasol.py',
15
+ '../_base_/datasets/pandaset.py',
16
+ '../_base_/datasets/waymo.py',
17
+
18
+ '../_base_/default_runtime.py',
19
+ '../_base_/schedules/schedule_1m.py',
20
+
21
+ '../_base_/datasets/hm3d.py',
22
+ '../_base_/datasets/matterport3d.py',
23
+ '../_base_/datasets/replica.py',
24
+ '../_base_/datasets/vkitti.py',
25
+ ]
26
+
27
+ import numpy as np
28
+ model=dict(
29
+ decode_head=dict(
30
+ type='RAFTDepthNormalDPT5',
31
+ iters=8,
32
+ n_downsample=2,
33
+ detach=False,
34
+ ),
35
+ )
36
+
37
+ # loss method
38
+ losses=dict(
39
+ decoder_losses=[
40
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
41
+ dict(type='GRUSequenceLoss', loss_weight=0.5, loss_gamma=0.9, stereo_sup=0.0),
42
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
43
+ dict(type='HDNRandomLoss', loss_weight=0.5, random_num=10),
44
+ dict(type='HDSNRandomLoss', loss_weight=0.5, random_num=20, batch_limit=4),
45
+ dict(type='PWNPlanesLoss', loss_weight=1),
46
+ dict(type='NormalBranchLoss', loss_weight=1.0, loss_fn='NLL_ours_GRU'),
47
+ dict(type='DeNoConsistencyLoss', loss_weight=0.01, loss_fn='CEL', scale=2, depth_detach=True)
48
+ ],
49
+ gru_losses=[
50
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
51
+ ],
52
+ )
53
+
54
+ data_array = [
55
+ # Outdoor 1
56
+ [
57
+ dict(UASOL='UASOL_dataset'), #13.6w
58
+ dict(Cityscapes_trainextra='Cityscapes_dataset'), #1.8w
59
+ dict(Cityscapes_sequence='Cityscapes_dataset'), #13.5w
60
+ dict(DIML='DIML_dataset'), # 12.2w
61
+ dict(Waymo='Waymo_dataset'), # 99w
62
+ ],
63
+ # Outdoor 2
64
+ [
65
+ dict(DSEC='DSEC_dataset'),
66
+ dict(Mapillary_PSD='MapillaryPSD_dataset'), # 74.2w
67
+ dict(DrivingStereo='DrivingStereo_dataset'), # 17.6w
68
+ dict(Argovers2='Argovers2_dataset'), # 285.6w
69
+ ],
70
+ # Outdoor 3
71
+ [
72
+ dict(Lyft='Lyft_dataset'), #15.8w
73
+ dict(DDAD='DDAD_dataset'), #7.4w
74
+ dict(Pandaset='Pandaset_dataset'), #3.8w
75
+ dict(Virtual_KITTI='VKITTI_dataset'), # 3.7w # syn
76
+ ],
77
+ #Indoor 1
78
+ [
79
+ dict(Replica='Replica_dataset'), # 5.6w # syn
80
+ dict(Replica_gso='Replica_dataset'), # 10.7w # syn
81
+ dict(Hypersim='Hypersim_dataset'), # 2.4w
82
+ dict(ScanNetAll='ScanNetAll_dataset'),
83
+ ],
84
+ # Indoor 2
85
+ [
86
+ dict(Taskonomy='Taskonomy_dataset'), #447.2w
87
+ dict(Matterport3D='Matterport3D_dataset'), #14.4w
88
+ dict(HM3D='HM3D_dataset'), # 200w, very noisy, sampled some data
89
+ ],
90
+ ]
91
+
92
+
93
+
94
+ # configs of the canonical space
95
+ data_basic=dict(
96
+ canonical_space = dict(
97
+ # img_size=(540, 960),
98
+ focal_length=1000.0,
99
+ ),
100
+ depth_range=(0, 1),
101
+ depth_normalize=(0.1, 200),
102
+ # crop_size=(544, 1216),
103
+ # crop_size = (544, 992),
104
+ crop_size = (616, 1064), # %28 = 0
105
+ )
106
+
107
+ log_interval = 100
108
+ # online evaluation
109
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
110
+ interval = 20000
111
+ evaluation = dict(
112
+ #online_eval=True,
113
+ online_eval=False,
114
+ interval=interval,
115
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
116
+ multi_dataset_eval=True,
117
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
118
+ )
119
+
120
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
121
+ checkpoint_config = dict(by_epoch=False, interval=interval)
122
+ runner = dict(type='IterBasedRunner_AMP', max_iters=800010)
123
+
124
+ # optimizer
125
+ optimizer = dict(
126
+ type='AdamW',
127
+ # encoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
128
+ encoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=1e-3, eps=1e-6),
129
+ decoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
130
+ )
131
+ # schedule
132
+ lr_config = dict(policy='poly',
133
+ warmup='linear',
134
+ warmup_iters=500,
135
+ warmup_ratio=1e-6,
136
+ power=0.9, min_lr=1e-6, by_epoch=False)
137
+
138
+ batchsize_per_gpu = 4
139
+ thread_per_gpu = 4
140
+
141
+ Argovers2_dataset=dict(
142
+ data = dict(
143
+ train=dict(
144
+ pipeline=[dict(type='BGR2RGB'),
145
+ dict(type='LabelScaleCononical'),
146
+ dict(type='RandomResize',
147
+ prob=0.5,
148
+ ratio_range=(0.85, 1.15),
149
+ is_lidar=True),
150
+ dict(type='RandomCrop',
151
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
152
+ crop_type='rand',
153
+ ignore_label=-1,
154
+ padding=[0, 0, 0]),
155
+ dict(type='RandomEdgeMask',
156
+ mask_maxsize=50,
157
+ prob=0.2,
158
+ rgb_invalid=[0,0,0],
159
+ label_invalid=-1,),
160
+ dict(type='RandomHorizontalFlip',
161
+ prob=0.4),
162
+ dict(type='PhotoMetricDistortion',
163
+ to_gray_prob=0.1,
164
+ distortion_prob=0.1,),
165
+ dict(type='Weather',
166
+ prob=0.05),
167
+ dict(type='RandomBlur',
168
+ prob=0.05),
169
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
170
+ dict(type='ToTensor'),
171
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
172
+ ],
173
+ #sample_size = 10000,
174
+ ),
175
+ val=dict(
176
+ pipeline=[dict(type='BGR2RGB'),
177
+ dict(type='LabelScaleCononical'),
178
+ dict(type='RandomCrop',
179
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
180
+ crop_type='center',
181
+ ignore_label=-1,
182
+ padding=[0, 0, 0]),
183
+ dict(type='ToTensor'),
184
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
185
+ ],
186
+ sample_size = 1200,
187
+ ),
188
+ ))
189
+ Cityscapes_dataset=dict(
190
+ data = dict(
191
+ train=dict(
192
+ pipeline=[dict(type='BGR2RGB'),
193
+ dict(type='LabelScaleCononical'),
194
+ dict(type='RandomResize',
195
+ ratio_range=(0.85, 1.15),
196
+ is_lidar=False),
197
+ dict(type='RandomCrop',
198
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
199
+ crop_type='rand',
200
+ ignore_label=-1,
201
+ padding=[0, 0, 0]),
202
+ dict(type='RandomEdgeMask',
203
+ mask_maxsize=50,
204
+ prob=0.2,
205
+ rgb_invalid=[0,0,0],
206
+ label_invalid=-1,),
207
+ dict(type='RandomHorizontalFlip',
208
+ prob=0.4),
209
+ dict(type='PhotoMetricDistortion',
210
+ to_gray_prob=0.1,
211
+ distortion_prob=0.1,),
212
+ dict(type='Weather',
213
+ prob=0.05),
214
+ dict(type='RandomBlur',
215
+ prob=0.05),
216
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
217
+ dict(type='ToTensor'),
218
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
219
+ ],
220
+ #sample_size = 10000,
221
+ ),
222
+ val=dict(
223
+ pipeline=[dict(type='BGR2RGB'),
224
+ dict(type='LabelScaleCononical'),
225
+ dict(type='RandomCrop',
226
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
227
+ crop_type='center',
228
+ ignore_label=-1,
229
+ padding=[0, 0, 0]),
230
+ dict(type='ToTensor'),
231
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
232
+ ],
233
+ sample_size = 1200,
234
+ ),
235
+ ))
236
+ DIML_dataset=dict(
237
+ data = dict(
238
+ train=dict(
239
+ pipeline=[dict(type='BGR2RGB'),
240
+ dict(type='LabelScaleCononical'),
241
+ dict(type='RandomResize',
242
+ ratio_range=(0.85, 1.15),
243
+ is_lidar=False),
244
+ dict(type='RandomCrop',
245
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
246
+ crop_type='rand',
247
+ ignore_label=-1,
248
+ padding=[0, 0, 0]),
249
+ dict(type='RandomEdgeMask',
250
+ mask_maxsize=50,
251
+ prob=0.2,
252
+ rgb_invalid=[0,0,0],
253
+ label_invalid=-1,),
254
+ dict(type='RandomHorizontalFlip',
255
+ prob=0.4),
256
+ dict(type='PhotoMetricDistortion',
257
+ to_gray_prob=0.1,
258
+ distortion_prob=0.1,),
259
+ dict(type='Weather',
260
+ prob=0.05),
261
+ dict(type='RandomBlur',
262
+ prob=0.05),
263
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
264
+ dict(type='ToTensor'),
265
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
266
+ ],
267
+ #sample_size = 10000,
268
+ ),
269
+ val=dict(
270
+ pipeline=[dict(type='BGR2RGB'),
271
+ dict(type='LabelScaleCononical'),
272
+ dict(type='RandomCrop',
273
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
274
+ crop_type='center',
275
+ ignore_label=-1,
276
+ padding=[0, 0, 0]),
277
+ dict(type='ToTensor'),
278
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
279
+ ],
280
+ sample_size = 1200,
281
+ ),
282
+ ))
283
+ Lyft_dataset=dict(
284
+ data = dict(
285
+ train=dict(
286
+ pipeline=[dict(type='BGR2RGB'),
287
+ dict(type='LabelScaleCononical'),
288
+ dict(type='RandomResize',
289
+ prob=0.5,
290
+ ratio_range=(0.85, 1.15),
291
+ is_lidar=True),
292
+ dict(type='RandomCrop',
293
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
294
+ crop_type='rand',
295
+ ignore_label=-1,
296
+ padding=[0, 0, 0]),
297
+ dict(type='RandomEdgeMask',
298
+ mask_maxsize=50,
299
+ prob=0.2,
300
+ rgb_invalid=[0,0,0],
301
+ label_invalid=-1,),
302
+ dict(type='RandomHorizontalFlip',
303
+ prob=0.4),
304
+ dict(type='PhotoMetricDistortion',
305
+ to_gray_prob=0.1,
306
+ distortion_prob=0.1,),
307
+ dict(type='Weather',
308
+ prob=0.05),
309
+ dict(type='RandomBlur',
310
+ prob=0.05),
311
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
312
+ dict(type='ToTensor'),
313
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
314
+ ],
315
+ #sample_size = 10000,
316
+ ),
317
+ val=dict(
318
+ pipeline=[dict(type='BGR2RGB'),
319
+ dict(type='LabelScaleCononical'),
320
+ dict(type='RandomCrop',
321
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
322
+ crop_type='center',
323
+ ignore_label=-1,
324
+ padding=[0, 0, 0]),
325
+ dict(type='ToTensor'),
326
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
327
+ ],
328
+ sample_size = 1200,
329
+ ),
330
+ ))
331
+ DDAD_dataset=dict(
332
+ data = dict(
333
+ train=dict(
334
+ pipeline=[dict(type='BGR2RGB'),
335
+ dict(type='LabelScaleCononical'),
336
+ dict(type='RandomResize',
337
+ prob=0.5,
338
+ ratio_range=(0.85, 1.15),
339
+ is_lidar=True),
340
+ dict(type='RandomCrop',
341
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
342
+ crop_type='rand',
343
+ ignore_label=-1,
344
+ padding=[0, 0, 0]),
345
+ dict(type='RandomEdgeMask',
346
+ mask_maxsize=50,
347
+ prob=0.2,
348
+ rgb_invalid=[0,0,0],
349
+ label_invalid=-1,),
350
+ dict(type='RandomHorizontalFlip',
351
+ prob=0.4),
352
+ dict(type='PhotoMetricDistortion',
353
+ to_gray_prob=0.1,
354
+ distortion_prob=0.1,),
355
+ dict(type='Weather',
356
+ prob=0.05),
357
+ dict(type='RandomBlur',
358
+ prob=0.05),
359
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
360
+ dict(type='ToTensor'),
361
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
362
+ ],
363
+ #sample_size = 10000,
364
+ ),
365
+ val=dict(
366
+ pipeline=[dict(type='BGR2RGB'),
367
+ dict(type='LabelScaleCononical'),
368
+ dict(type='RandomCrop',
369
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
370
+ crop_type='center',
371
+ ignore_label=-1,
372
+ padding=[0, 0, 0]),
373
+ dict(type='ToTensor'),
374
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
375
+ ],
376
+ # sample_size = 1200,
377
+ ),
378
+ ))
379
+ DSEC_dataset=dict(
380
+ data = dict(
381
+ train=dict(
382
+ pipeline=[dict(type='BGR2RGB'),
383
+ dict(type='LabelScaleCononical'),
384
+ dict(type='RandomResize',
385
+ prob=0.5,
386
+ ratio_range=(0.85, 1.15),
387
+ is_lidar=True),
388
+ dict(type='RandomCrop',
389
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
390
+ crop_type='rand',
391
+ ignore_label=-1,
392
+ padding=[0, 0, 0]),
393
+ dict(type='RandomEdgeMask',
394
+ mask_maxsize=50,
395
+ prob=0.2,
396
+ rgb_invalid=[0,0,0],
397
+ label_invalid=-1,),
398
+ dict(type='RandomHorizontalFlip',
399
+ prob=0.4),
400
+ dict(type='PhotoMetricDistortion',
401
+ to_gray_prob=0.1,
402
+ distortion_prob=0.1,),
403
+ dict(type='Weather',
404
+ prob=0.05),
405
+ dict(type='RandomBlur',
406
+ prob=0.05),
407
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
408
+ dict(type='ToTensor'),
409
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
410
+ ],
411
+ #sample_size = 10000,
412
+ ),
413
+ val=dict(
414
+ pipeline=[dict(type='BGR2RGB'),
415
+ dict(type='LabelScaleCononical'),
416
+ dict(type='RandomCrop',
417
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
418
+ crop_type='center',
419
+ ignore_label=-1,
420
+ padding=[0, 0, 0]),
421
+ dict(type='ToTensor'),
422
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
423
+ ],
424
+ sample_size = 1200,
425
+ ),
426
+ ))
427
+ DrivingStereo_dataset=dict(
428
+ data = dict(
429
+ train=dict(
430
+ pipeline=[dict(type='BGR2RGB'),
431
+ dict(type='LabelScaleCononical'),
432
+ dict(type='RandomResize',
433
+ ratio_range=(0.85, 1.15),
434
+ is_lidar=False),
435
+ dict(type='RandomCrop',
436
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
437
+ crop_type='rand',
438
+ ignore_label=-1,
439
+ padding=[0, 0, 0]),
440
+ dict(type='RandomEdgeMask',
441
+ mask_maxsize=50,
442
+ prob=0.2,
443
+ rgb_invalid=[0,0,0],
444
+ label_invalid=-1,),
445
+ dict(type='RandomHorizontalFlip',
446
+ prob=0.4),
447
+ dict(type='PhotoMetricDistortion',
448
+ to_gray_prob=0.1,
449
+ distortion_prob=0.1,),
450
+ dict(type='Weather',
451
+ prob=0.05),
452
+ dict(type='RandomBlur',
453
+ prob=0.05),
454
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
455
+ dict(type='ToTensor'),
456
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
457
+ ],
458
+ #sample_size = 10000,
459
+ ),
460
+ val=dict(
461
+ pipeline=[dict(type='BGR2RGB'),
462
+ dict(type='LabelScaleCononical'),
463
+ dict(type='RandomCrop',
464
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
465
+ crop_type='center',
466
+ ignore_label=-1,
467
+ padding=[0, 0, 0]),
468
+ dict(type='ToTensor'),
469
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
470
+ ],
471
+ sample_size = 1200,
472
+ ),
473
+ ))
474
+ MapillaryPSD_dataset=dict(
475
+ data = dict(
476
+ train=dict(
477
+ pipeline=[dict(type='BGR2RGB'),
478
+ dict(type='LabelScaleCononical'),
479
+ dict(type='RandomResize',
480
+ prob=0.5,
481
+ ratio_range=(0.85, 1.15),
482
+ is_lidar=True),
483
+ dict(type='RandomCrop',
484
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
485
+ crop_type='rand',
486
+ ignore_label=-1,
487
+ padding=[0, 0, 0]),
488
+ dict(type='RandomEdgeMask',
489
+ mask_maxsize=50,
490
+ prob=0.2,
491
+ rgb_invalid=[0,0,0],
492
+ label_invalid=-1,),
493
+ dict(type='RandomHorizontalFlip',
494
+ prob=0.4),
495
+ dict(type='PhotoMetricDistortion',
496
+ to_gray_prob=0.1,
497
+ distortion_prob=0.1,),
498
+ dict(type='Weather',
499
+ prob=0.05),
500
+ dict(type='RandomBlur',
501
+ prob=0.05),
502
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
503
+ dict(type='ToTensor'),
504
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
505
+ ],
506
+ #sample_size = 10000,
507
+ ),
508
+ val=dict(
509
+ pipeline=[dict(type='BGR2RGB'),
510
+ dict(type='LabelScaleCononical'),
511
+ dict(type='RandomCrop',
512
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
513
+ crop_type='center',
514
+ ignore_label=-1,
515
+ padding=[0, 0, 0]),
516
+ dict(type='ToTensor'),
517
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
518
+ ],
519
+ sample_size = 1200,
520
+ ),
521
+ ))
522
+ Pandaset_dataset=dict(
523
+ data = dict(
524
+ train=dict(
525
+ pipeline=[dict(type='BGR2RGB'),
526
+ dict(type='LabelScaleCononical'),
527
+ dict(type='RandomResize',
528
+ prob=0.5,
529
+ ratio_range=(0.85, 1.15),
530
+ is_lidar=True),
531
+ dict(type='RandomCrop',
532
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
533
+ crop_type='rand',
534
+ ignore_label=-1,
535
+ padding=[0, 0, 0]),
536
+ dict(type='RandomEdgeMask',
537
+ mask_maxsize=50,
538
+ prob=0.2,
539
+ rgb_invalid=[0,0,0],
540
+ label_invalid=-1,),
541
+ dict(type='RandomHorizontalFlip',
542
+ prob=0.4),
543
+ dict(type='PhotoMetricDistortion',
544
+ to_gray_prob=0.1,
545
+ distortion_prob=0.1,),
546
+ dict(type='Weather',
547
+ prob=0.05),
548
+ dict(type='RandomBlur',
549
+ prob=0.05),
550
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
551
+ dict(type='ToTensor'),
552
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
553
+ ],
554
+ #sample_size = 10000,
555
+ ),
556
+ val=dict(
557
+ pipeline=[dict(type='BGR2RGB'),
558
+ dict(type='LabelScaleCononical'),
559
+ dict(type='RandomCrop',
560
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
561
+ crop_type='center',
562
+ ignore_label=-1,
563
+ padding=[0, 0, 0]),
564
+ dict(type='ToTensor'),
565
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
566
+ ],
567
+ sample_size = 1200,
568
+ ),
569
+ ))
570
+ Taskonomy_dataset=dict(
571
+ data = dict(
572
+ train=dict(
573
+ pipeline=[dict(type='BGR2RGB'),
574
+ dict(type='LabelScaleCononical'),
575
+ dict(type='RandomResize',
576
+ prob=0.5,
577
+ ratio_range=(0.85, 1.15),
578
+ is_lidar=False),
579
+ dict(type='RandomCrop',
580
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
581
+ crop_type='rand',
582
+ ignore_label=-1,
583
+ padding=[0, 0, 0]),
584
+ dict(type='RandomEdgeMask',
585
+ mask_maxsize=50,
586
+ prob=0.2,
587
+ rgb_invalid=[0,0,0],
588
+ label_invalid=-1,),
589
+ dict(type='RandomHorizontalFlip',
590
+ prob=0.4),
591
+ dict(type='PhotoMetricDistortion',
592
+ to_gray_prob=0.1,
593
+ distortion_prob=0.1,),
594
+ dict(type='Weather',
595
+ prob=0.05),
596
+ dict(type='RandomBlur',
597
+ prob=0.05),
598
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
599
+ dict(type='ToTensor'),
600
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
601
+ ],
602
+ #sample_size = 10000,
603
+ ),
604
+ val=dict(
605
+ pipeline=[dict(type='BGR2RGB'),
606
+ dict(type='LabelScaleCononical'),
607
+ dict(type='RandomCrop',
608
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
609
+ crop_type='center',
610
+ ignore_label=-1,
611
+ padding=[0, 0, 0]),
612
+ dict(type='ToTensor'),
613
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
614
+ ],
615
+ sample_size = 1200,
616
+ ),
617
+ ))
618
+ UASOL_dataset=dict(
619
+ data = dict(
620
+ train=dict(
621
+ pipeline=[dict(type='BGR2RGB'),
622
+ dict(type='LabelScaleCononical'),
623
+ dict(type='RandomResize',
624
+ prob=0.5,
625
+ ratio_range=(0.85, 1.15),
626
+ is_lidar=False),
627
+ dict(type='RandomCrop',
628
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
629
+ crop_type='rand',
630
+ ignore_label=-1,
631
+ padding=[0, 0, 0]),
632
+ dict(type='RandomEdgeMask',
633
+ mask_maxsize=50,
634
+ prob=0.2,
635
+ rgb_invalid=[0,0,0],
636
+ label_invalid=-1,),
637
+ dict(type='RandomHorizontalFlip',
638
+ prob=0.4),
639
+ dict(type='PhotoMetricDistortion',
640
+ to_gray_prob=0.1,
641
+ distortion_prob=0.1,),
642
+ dict(type='Weather',
643
+ prob=0.05),
644
+ dict(type='RandomBlur',
645
+ prob=0.05),
646
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
647
+ dict(type='ToTensor'),
648
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
649
+ ],
650
+ #sample_size = 10000,
651
+ ),
652
+ val=dict(
653
+ pipeline=[dict(type='BGR2RGB'),
654
+ dict(type='LabelScaleCononical'),
655
+ dict(type='RandomCrop',
656
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
657
+ crop_type='center',
658
+ ignore_label=-1,
659
+ padding=[0, 0, 0]),
660
+ dict(type='ToTensor'),
661
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
662
+ ],
663
+ sample_size = 1200,
664
+ ),
665
+ ))
666
+ Waymo_dataset=dict(
667
+ data = dict(
668
+ train=dict(
669
+ pipeline=[dict(type='BGR2RGB'),
670
+ dict(type='LabelScaleCononical'),
671
+ dict(type='RandomResize',
672
+ prob=0.5,
673
+ ratio_range=(0.85, 1.15),
674
+ is_lidar=True),
675
+ dict(type='RandomCrop',
676
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
677
+ crop_type='rand',
678
+ ignore_label=-1,
679
+ padding=[0, 0, 0]),
680
+ dict(type='RandomEdgeMask',
681
+ mask_maxsize=50,
682
+ prob=0.2,
683
+ rgb_invalid=[0,0,0],
684
+ label_invalid=-1,),
685
+ dict(type='RandomHorizontalFlip',
686
+ prob=0.4),
687
+ dict(type='PhotoMetricDistortion',
688
+ to_gray_prob=0.1,
689
+ distortion_prob=0.1,),
690
+ dict(type='Weather',
691
+ prob=0.05),
692
+ dict(type='RandomBlur',
693
+ prob=0.05),
694
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
695
+ dict(type='ToTensor'),
696
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
697
+ ],
698
+ #sample_size = 10000,
699
+ ),
700
+ val=dict(
701
+ pipeline=[dict(type='BGR2RGB'),
702
+ dict(type='LabelScaleCononical'),
703
+ dict(type='RandomCrop',
704
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
705
+ crop_type='center',
706
+ ignore_label=-1,
707
+ padding=[0, 0, 0]),
708
+ dict(type='ToTensor'),
709
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
710
+ ],
711
+ sample_size = 1200,
712
+ ),
713
+ ))
714
+ Matterport3D_dataset=dict(
715
+ data = dict(
716
+ train=dict(
717
+ pipeline=[dict(type='BGR2RGB'),
718
+ dict(type='LabelScaleCononical'),
719
+ dict(type='RandomResize',
720
+ prob=0.5,
721
+ ratio_range=(0.85, 1.15),
722
+ is_lidar=False),
723
+ dict(type='RandomCrop',
724
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
725
+ crop_type='rand',
726
+ ignore_label=-1,
727
+ padding=[0, 0, 0]),
728
+ dict(type='RandomEdgeMask',
729
+ mask_maxsize=50,
730
+ prob=0.2,
731
+ rgb_invalid=[0,0,0],
732
+ label_invalid=-1,),
733
+ dict(type='RandomHorizontalFlip',
734
+ prob=0.4),
735
+ dict(type='PhotoMetricDistortion',
736
+ to_gray_prob=0.1,
737
+ distortion_prob=0.1,),
738
+ dict(type='Weather',
739
+ prob=0.05),
740
+ dict(type='RandomBlur',
741
+ prob=0.05),
742
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
743
+ dict(type='ToTensor'),
744
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
745
+ ],
746
+ #sample_size = 10000,
747
+ ),
748
+ val=dict(
749
+ pipeline=[dict(type='BGR2RGB'),
750
+ dict(type='LabelScaleCononical'),
751
+ dict(type='RandomCrop',
752
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
753
+ crop_type='center',
754
+ ignore_label=-1,
755
+ padding=[0, 0, 0]),
756
+ dict(type='ToTensor'),
757
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
758
+ ],
759
+ sample_size = 1200,
760
+ ),
761
+ ))
762
+ Replica_dataset=dict(
763
+ data = dict(
764
+ train=dict(
765
+ pipeline=[dict(type='BGR2RGB'),
766
+ dict(type='LabelScaleCononical'),
767
+ dict(type='RandomResize',
768
+ prob=0.5,
769
+ ratio_range=(0.85, 1.15),
770
+ is_lidar=False),
771
+ dict(type='RandomCrop',
772
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
773
+ crop_type='rand',
774
+ ignore_label=-1,
775
+ padding=[0, 0, 0]),
776
+ dict(type='RandomEdgeMask',
777
+ mask_maxsize=50,
778
+ prob=0.2,
779
+ rgb_invalid=[0,0,0],
780
+ label_invalid=-1,),
781
+ dict(type='RandomHorizontalFlip',
782
+ prob=0.4),
783
+ dict(type='PhotoMetricDistortion',
784
+ to_gray_prob=0.1,
785
+ distortion_prob=0.1,),
786
+ dict(type='Weather',
787
+ prob=0.05),
788
+ dict(type='RandomBlur',
789
+ prob=0.05),
790
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
791
+ dict(type='ToTensor'),
792
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
793
+ ],
794
+ #sample_size = 10000,
795
+ ),
796
+ val=dict(
797
+ pipeline=[dict(type='BGR2RGB'),
798
+ dict(type='LabelScaleCononical'),
799
+ dict(type='RandomCrop',
800
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
801
+ crop_type='center',
802
+ ignore_label=-1,
803
+ padding=[0, 0, 0]),
804
+ dict(type='ToTensor'),
805
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
806
+ ],
807
+ sample_size = 1200,
808
+ ),
809
+ ))
810
+ VKITTI_dataset=dict(
811
+ data = dict(
812
+ train=dict(
813
+ pipeline=[dict(type='BGR2RGB'),
814
+ dict(type='LabelScaleCononical'),
815
+ dict(type='RandomResize',
816
+ prob=0.5,
817
+ ratio_range=(0.85, 1.15),
818
+ is_lidar=False),
819
+ dict(type='RandomCrop',
820
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
821
+ crop_type='rand',
822
+ ignore_label=-1,
823
+ padding=[0, 0, 0]),
824
+ dict(type='RandomEdgeMask',
825
+ mask_maxsize=50,
826
+ prob=0.2,
827
+ rgb_invalid=[0,0,0],
828
+ label_invalid=-1,),
829
+ dict(type='RandomHorizontalFlip',
830
+ prob=0.4),
831
+ dict(type='PhotoMetricDistortion',
832
+ to_gray_prob=0.1,
833
+ distortion_prob=0.1,),
834
+ dict(type='Weather',
835
+ prob=0.05),
836
+ dict(type='RandomBlur',
837
+ prob=0.05),
838
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
839
+ dict(type='ToTensor'),
840
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
841
+ ],
842
+ #sample_size = 10000,
843
+ ),
844
+ val=dict(
845
+ pipeline=[dict(type='BGR2RGB'),
846
+ dict(type='LabelScaleCononical'),
847
+ dict(type='RandomCrop',
848
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
849
+ crop_type='center',
850
+ ignore_label=-1,
851
+ padding=[0, 0, 0]),
852
+ dict(type='ToTensor'),
853
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
854
+ ],
855
+ sample_size = 1200,
856
+ ),
857
+ ))
858
+ HM3D_dataset=dict(
859
+ data = dict(
860
+ train=dict(
861
+ pipeline=[dict(type='BGR2RGB'),
862
+ dict(type='LabelScaleCononical'),
863
+ dict(type='RandomResize',
864
+ prob=0.5,
865
+ ratio_range=(0.75, 1.3),
866
+ is_lidar=False),
867
+ dict(type='RandomCrop',
868
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
869
+ crop_type='rand',
870
+ ignore_label=-1,
871
+ padding=[0, 0, 0]),
872
+ dict(type='RandomEdgeMask',
873
+ mask_maxsize=50,
874
+ prob=0.2,
875
+ rgb_invalid=[0,0,0],
876
+ label_invalid=-1,),
877
+ dict(type='RandomHorizontalFlip',
878
+ prob=0.4),
879
+ dict(type='PhotoMetricDistortion',
880
+ to_gray_prob=0.1,
881
+ distortion_prob=0.1,),
882
+ dict(type='Weather',
883
+ prob=0.05),
884
+ dict(type='RandomBlur',
885
+ prob=0.05),
886
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
887
+ dict(type='ToTensor'),
888
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
889
+ ],
890
+ #sample_size = 10000,
891
+ ),
892
+ val=dict(
893
+ pipeline=[dict(type='BGR2RGB'),
894
+ dict(type='LabelScaleCononical'),
895
+ dict(type='RandomCrop',
896
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
897
+ crop_type='center',
898
+ ignore_label=-1,
899
+ padding=[0, 0, 0]),
900
+ dict(type='ToTensor'),
901
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
902
+ ],
903
+ sample_size = 1200,
904
+ ),
905
+ ))
906
+ BlendedMVG_omni_dataset=dict(
907
+ data = dict(
908
+ train=dict(
909
+ pipeline=[dict(type='BGR2RGB'),
910
+ dict(type='LabelScaleCononical'),
911
+ dict(type='RandomResize',
912
+ prob=0.5,
913
+ ratio_range=(0.75, 1.3),
914
+ is_lidar=False),
915
+ dict(type='RandomCrop',
916
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
917
+ crop_type='rand',
918
+ ignore_label=-1,
919
+ padding=[0, 0, 0]),
920
+ dict(type='RandomEdgeMask',
921
+ mask_maxsize=50,
922
+ prob=0.2,
923
+ rgb_invalid=[0,0,0],
924
+ label_invalid=-1,),
925
+ dict(type='RandomHorizontalFlip',
926
+ prob=0.4),
927
+ dict(type='PhotoMetricDistortion',
928
+ to_gray_prob=0.1,
929
+ distortion_prob=0.1,),
930
+ dict(type='Weather',
931
+ prob=0.05),
932
+ dict(type='RandomBlur',
933
+ prob=0.05),
934
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
935
+ dict(type='ToTensor'),
936
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
937
+ ],
938
+ ),
939
+ val=dict(
940
+ pipeline=[dict(type='BGR2RGB'),
941
+ dict(type='LabelScaleCononical'),
942
+ dict(type='RandomCrop',
943
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
944
+ crop_type='center',
945
+ ignore_label=-1,
946
+ padding=[0, 0, 0]),
947
+ dict(type='ToTensor'),
948
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
949
+ ],
950
+ ),
951
+ ))
952
+ ScanNetAll_dataset=dict(
953
+ data = dict(
954
+ train=dict(
955
+ pipeline=[dict(type='BGR2RGB'),
956
+ dict(type='LabelScaleCononical'),
957
+ dict(type='RandomResize',
958
+ prob=0.5,
959
+ ratio_range=(0.85, 1.15),
960
+ is_lidar=False),
961
+ dict(type='RandomCrop',
962
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
963
+ crop_type='rand',
964
+ ignore_label=-1,
965
+ padding=[0, 0, 0]),
966
+ dict(type='RandomEdgeMask',
967
+ mask_maxsize=50,
968
+ prob=0.2,
969
+ rgb_invalid=[0,0,0],
970
+ label_invalid=-1,),
971
+ dict(type='RandomHorizontalFlip',
972
+ prob=0.4),
973
+ dict(type='PhotoMetricDistortion',
974
+ to_gray_prob=0.1,
975
+ distortion_prob=0.1,),
976
+ dict(type='Weather',
977
+ prob=0.05),
978
+ dict(type='RandomBlur',
979
+ prob=0.05),
980
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
981
+ dict(type='ToTensor'),
982
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
983
+ ],
984
+ #sample_size = 10000,
985
+ ),
986
+ val=dict(
987
+ pipeline=[dict(type='BGR2RGB'),
988
+ dict(type='LabelScaleCononical'),
989
+ dict(type='RandomCrop',
990
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
991
+ crop_type='center',
992
+ ignore_label=-1,
993
+ padding=[0, 0, 0]),
994
+ dict(type='ToTensor'),
995
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
996
+ ],
997
+ sample_size = 1200,
998
+ ),
999
+ ))
1000
+ Hypersim_dataset=dict(
1001
+ data = dict(
1002
+ train=dict(
1003
+ pipeline=[dict(type='BGR2RGB'),
1004
+ dict(type='LabelScaleCononical'),
1005
+ dict(type='RandomResize',
1006
+ prob=0.5,
1007
+ ratio_range=(0.85, 1.15),
1008
+ is_lidar=False),
1009
+ dict(type='RandomCrop',
1010
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1011
+ crop_type='rand',
1012
+ ignore_label=-1,
1013
+ padding=[0, 0, 0]),
1014
+ dict(type='RandomEdgeMask',
1015
+ mask_maxsize=50,
1016
+ prob=0.2,
1017
+ rgb_invalid=[0,0,0],
1018
+ label_invalid=-1,),
1019
+ dict(type='RandomHorizontalFlip',
1020
+ prob=0.4),
1021
+ dict(type='PhotoMetricDistortion',
1022
+ to_gray_prob=0.1,
1023
+ distortion_prob=0.1,),
1024
+ dict(type='Weather',
1025
+ prob=0.05),
1026
+ dict(type='RandomBlur',
1027
+ prob=0.05),
1028
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
1029
+ dict(type='ToTensor'),
1030
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1031
+ ],
1032
+ #sample_size = 10000,
1033
+ ),
1034
+ val=dict(
1035
+ pipeline=[dict(type='BGR2RGB'),
1036
+ dict(type='LabelScaleCononical'),
1037
+ dict(type='RandomCrop',
1038
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1039
+ crop_type='center',
1040
+ ignore_label=-1,
1041
+ padding=[0, 0, 0]),
1042
+ dict(type='ToTensor'),
1043
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1044
+ ],
1045
+ sample_size = 1200,
1046
+ ),
1047
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.small.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/ddad.py',
5
+ '../_base_/datasets/_data_base_.py',
6
+ '../_base_/datasets/argovers2.py',
7
+ '../_base_/datasets/cityscapes.py',
8
+ '../_base_/datasets/drivingstereo.py',
9
+ '../_base_/datasets/dsec.py',
10
+ '../_base_/datasets/lyft.py',
11
+ '../_base_/datasets/mapillary_psd.py',
12
+ '../_base_/datasets/diml.py',
13
+ '../_base_/datasets/taskonomy.py',
14
+ '../_base_/datasets/uasol.py',
15
+ '../_base_/datasets/pandaset.py',
16
+ '../_base_/datasets/waymo.py',
17
+
18
+ '../_base_/default_runtime.py',
19
+ '../_base_/schedules/schedule_1m.py',
20
+
21
+ '../_base_/datasets/hm3d.py',
22
+ '../_base_/datasets/matterport3d.py',
23
+ '../_base_/datasets/replica.py',
24
+ '../_base_/datasets/vkitti.py',
25
+ ]
26
+
27
+ import numpy as np
28
+ model=dict(
29
+ decode_head=dict(
30
+ type='RAFTDepthNormalDPT5',
31
+ iters=4,
32
+ n_downsample=2,
33
+ detach=False,
34
+ ),
35
+ )
36
+
37
+ # loss method
38
+ losses=dict(
39
+ decoder_losses=[
40
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
41
+ dict(type='GRUSequenceLoss', loss_weight=0.5, loss_gamma=0.9, stereo_sup=0.0),
42
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
43
+ dict(type='HDNRandomLoss', loss_weight=0.5, random_num=10),
44
+ dict(type='HDSNRandomLoss', loss_weight=0.5, random_num=20, batch_limit=4),
45
+ dict(type='PWNPlanesLoss', loss_weight=1),
46
+ dict(type='NormalBranchLoss', loss_weight=1.0, loss_fn='NLL_ours_GRU'),
47
+ dict(type='DeNoConsistencyLoss', loss_weight=0.01, loss_fn='CEL', scale=2, depth_detach=True)
48
+ ],
49
+ gru_losses=[
50
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
51
+ ],
52
+ )
53
+
54
+ data_array = [
55
+ # Outdoor 1
56
+ [
57
+ dict(UASOL='UASOL_dataset'), #13.6w
58
+ dict(Cityscapes_trainextra='Cityscapes_dataset'), #1.8w
59
+ dict(Cityscapes_sequence='Cityscapes_dataset'), #13.5w
60
+ dict(DIML='DIML_dataset'), # 12.2w
61
+ dict(Waymo='Waymo_dataset'), # 99w
62
+ ],
63
+ # Outdoor 2
64
+ [
65
+ dict(DSEC='DSEC_dataset'),
66
+ dict(Mapillary_PSD='MapillaryPSD_dataset'), # 74.2w
67
+ dict(DrivingStereo='DrivingStereo_dataset'), # 17.6w
68
+ dict(Argovers2='Argovers2_dataset'), # 285.6w
69
+ ],
70
+ # Outdoor 3
71
+ [
72
+ dict(Lyft='Lyft_dataset'), #15.8w
73
+ dict(DDAD='DDAD_dataset'), #7.4w
74
+ dict(Pandaset='Pandaset_dataset'), #3.8w
75
+ dict(Virtual_KITTI='VKITTI_dataset'), # 3.7w # syn
76
+ ],
77
+ #Indoor 1
78
+ [
79
+ dict(Replica='Replica_dataset'), # 5.6w # syn
80
+ dict(Replica_gso='Replica_dataset'), # 10.7w # syn
81
+ dict(Hypersim='Hypersim_dataset'), # 2.4w
82
+ dict(ScanNetAll='ScanNetAll_dataset'),
83
+ ],
84
+ # Indoor 2
85
+ [
86
+ dict(Taskonomy='Taskonomy_dataset'), #447.2w
87
+ dict(Matterport3D='Matterport3D_dataset'), #14.4w
88
+ dict(HM3D='HM3D_dataset'), # 200w, very noisy, sampled some data
89
+ ],
90
+ ]
91
+
92
+
93
+
94
+ # configs of the canonical space
95
+ data_basic=dict(
96
+ canonical_space = dict(
97
+ # img_size=(540, 960),
98
+ focal_length=1000.0,
99
+ ),
100
+ depth_range=(0, 1),
101
+ depth_normalize=(0.1, 200),
102
+ # crop_size=(544, 1216),
103
+ # crop_size = (544, 992),
104
+ crop_size = (616, 1064), # %28 = 0
105
+ )
106
+
107
+ log_interval = 100
108
+ # online evaluation
109
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
110
+ interval = 20000
111
+ evaluation = dict(
112
+ #online_eval=True,
113
+ online_eval=False,
114
+ interval=interval,
115
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
116
+ multi_dataset_eval=True,
117
+ exclude=['DIML_indoor', 'GL3D', 'Tourism', 'MegaDepth'],
118
+ )
119
+
120
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
121
+ checkpoint_config = dict(by_epoch=False, interval=interval)
122
+ runner = dict(type='IterBasedRunner_AMP', max_iters=800010)
123
+
124
+ # optimizer
125
+ optimizer = dict(
126
+ type='AdamW',
127
+ # encoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
128
+ encoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=1e-3, eps=1e-6),
129
+ decoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
130
+ )
131
+ # schedule
132
+ lr_config = dict(policy='poly',
133
+ warmup='linear',
134
+ warmup_iters=500,
135
+ warmup_ratio=1e-6,
136
+ power=0.9, min_lr=1e-6, by_epoch=False)
137
+
138
+ batchsize_per_gpu = 6
139
+ thread_per_gpu = 4
140
+
141
+ Argovers2_dataset=dict(
142
+ data = dict(
143
+ train=dict(
144
+ pipeline=[dict(type='BGR2RGB'),
145
+ dict(type='LabelScaleCononical'),
146
+ dict(type='RandomResize',
147
+ prob=0.5,
148
+ ratio_range=(0.85, 1.15),
149
+ is_lidar=True),
150
+ dict(type='RandomCrop',
151
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
152
+ crop_type='rand',
153
+ ignore_label=-1,
154
+ padding=[0, 0, 0]),
155
+ dict(type='RandomEdgeMask',
156
+ mask_maxsize=50,
157
+ prob=0.2,
158
+ rgb_invalid=[0,0,0],
159
+ label_invalid=-1,),
160
+ dict(type='RandomHorizontalFlip',
161
+ prob=0.4),
162
+ dict(type='PhotoMetricDistortion',
163
+ to_gray_prob=0.1,
164
+ distortion_prob=0.1,),
165
+ dict(type='Weather',
166
+ prob=0.05),
167
+ dict(type='RandomBlur',
168
+ prob=0.05),
169
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
170
+ dict(type='ToTensor'),
171
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
172
+ ],
173
+ #sample_size = 10000,
174
+ ),
175
+ val=dict(
176
+ pipeline=[dict(type='BGR2RGB'),
177
+ dict(type='LabelScaleCononical'),
178
+ dict(type='RandomCrop',
179
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
180
+ crop_type='center',
181
+ ignore_label=-1,
182
+ padding=[0, 0, 0]),
183
+ dict(type='ToTensor'),
184
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
185
+ ],
186
+ sample_size = 1200,
187
+ ),
188
+ ))
189
+ Cityscapes_dataset=dict(
190
+ data = dict(
191
+ train=dict(
192
+ pipeline=[dict(type='BGR2RGB'),
193
+ dict(type='LabelScaleCononical'),
194
+ dict(type='RandomResize',
195
+ ratio_range=(0.85, 1.15),
196
+ is_lidar=False),
197
+ dict(type='RandomCrop',
198
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
199
+ crop_type='rand',
200
+ ignore_label=-1,
201
+ padding=[0, 0, 0]),
202
+ dict(type='RandomEdgeMask',
203
+ mask_maxsize=50,
204
+ prob=0.2,
205
+ rgb_invalid=[0,0,0],
206
+ label_invalid=-1,),
207
+ dict(type='RandomHorizontalFlip',
208
+ prob=0.4),
209
+ dict(type='PhotoMetricDistortion',
210
+ to_gray_prob=0.1,
211
+ distortion_prob=0.1,),
212
+ dict(type='Weather',
213
+ prob=0.05),
214
+ dict(type='RandomBlur',
215
+ prob=0.05),
216
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
217
+ dict(type='ToTensor'),
218
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
219
+ ],
220
+ #sample_size = 10000,
221
+ ),
222
+ val=dict(
223
+ pipeline=[dict(type='BGR2RGB'),
224
+ dict(type='LabelScaleCononical'),
225
+ dict(type='RandomCrop',
226
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
227
+ crop_type='center',
228
+ ignore_label=-1,
229
+ padding=[0, 0, 0]),
230
+ dict(type='ToTensor'),
231
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
232
+ ],
233
+ sample_size = 1200,
234
+ ),
235
+ ))
236
+ DIML_dataset=dict(
237
+ data = dict(
238
+ train=dict(
239
+ pipeline=[dict(type='BGR2RGB'),
240
+ dict(type='LabelScaleCononical'),
241
+ dict(type='RandomResize',
242
+ ratio_range=(0.85, 1.15),
243
+ is_lidar=False),
244
+ dict(type='RandomCrop',
245
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
246
+ crop_type='rand',
247
+ ignore_label=-1,
248
+ padding=[0, 0, 0]),
249
+ dict(type='RandomEdgeMask',
250
+ mask_maxsize=50,
251
+ prob=0.2,
252
+ rgb_invalid=[0,0,0],
253
+ label_invalid=-1,),
254
+ dict(type='RandomHorizontalFlip',
255
+ prob=0.4),
256
+ dict(type='PhotoMetricDistortion',
257
+ to_gray_prob=0.1,
258
+ distortion_prob=0.1,),
259
+ dict(type='Weather',
260
+ prob=0.05),
261
+ dict(type='RandomBlur',
262
+ prob=0.05),
263
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
264
+ dict(type='ToTensor'),
265
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
266
+ ],
267
+ #sample_size = 10000,
268
+ ),
269
+ val=dict(
270
+ pipeline=[dict(type='BGR2RGB'),
271
+ dict(type='LabelScaleCononical'),
272
+ dict(type='RandomCrop',
273
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
274
+ crop_type='center',
275
+ ignore_label=-1,
276
+ padding=[0, 0, 0]),
277
+ dict(type='ToTensor'),
278
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
279
+ ],
280
+ sample_size = 1200,
281
+ ),
282
+ ))
283
+ Lyft_dataset=dict(
284
+ data = dict(
285
+ train=dict(
286
+ pipeline=[dict(type='BGR2RGB'),
287
+ dict(type='LabelScaleCononical'),
288
+ dict(type='RandomResize',
289
+ prob=0.5,
290
+ ratio_range=(0.85, 1.15),
291
+ is_lidar=True),
292
+ dict(type='RandomCrop',
293
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
294
+ crop_type='rand',
295
+ ignore_label=-1,
296
+ padding=[0, 0, 0]),
297
+ dict(type='RandomEdgeMask',
298
+ mask_maxsize=50,
299
+ prob=0.2,
300
+ rgb_invalid=[0,0,0],
301
+ label_invalid=-1,),
302
+ dict(type='RandomHorizontalFlip',
303
+ prob=0.4),
304
+ dict(type='PhotoMetricDistortion',
305
+ to_gray_prob=0.1,
306
+ distortion_prob=0.1,),
307
+ dict(type='Weather',
308
+ prob=0.05),
309
+ dict(type='RandomBlur',
310
+ prob=0.05),
311
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
312
+ dict(type='ToTensor'),
313
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
314
+ ],
315
+ #sample_size = 10000,
316
+ ),
317
+ val=dict(
318
+ pipeline=[dict(type='BGR2RGB'),
319
+ dict(type='LabelScaleCononical'),
320
+ dict(type='RandomCrop',
321
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
322
+ crop_type='center',
323
+ ignore_label=-1,
324
+ padding=[0, 0, 0]),
325
+ dict(type='ToTensor'),
326
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
327
+ ],
328
+ sample_size = 1200,
329
+ ),
330
+ ))
331
+ DDAD_dataset=dict(
332
+ data = dict(
333
+ train=dict(
334
+ pipeline=[dict(type='BGR2RGB'),
335
+ dict(type='LabelScaleCononical'),
336
+ dict(type='RandomResize',
337
+ prob=0.5,
338
+ ratio_range=(0.85, 1.15),
339
+ is_lidar=True),
340
+ dict(type='RandomCrop',
341
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
342
+ crop_type='rand',
343
+ ignore_label=-1,
344
+ padding=[0, 0, 0]),
345
+ dict(type='RandomEdgeMask',
346
+ mask_maxsize=50,
347
+ prob=0.2,
348
+ rgb_invalid=[0,0,0],
349
+ label_invalid=-1,),
350
+ dict(type='RandomHorizontalFlip',
351
+ prob=0.4),
352
+ dict(type='PhotoMetricDistortion',
353
+ to_gray_prob=0.1,
354
+ distortion_prob=0.1,),
355
+ dict(type='Weather',
356
+ prob=0.05),
357
+ dict(type='RandomBlur',
358
+ prob=0.05),
359
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
360
+ dict(type='ToTensor'),
361
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
362
+ ],
363
+ #sample_size = 10000,
364
+ ),
365
+ val=dict(
366
+ pipeline=[dict(type='BGR2RGB'),
367
+ dict(type='LabelScaleCononical'),
368
+ dict(type='RandomCrop',
369
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
370
+ crop_type='center',
371
+ ignore_label=-1,
372
+ padding=[0, 0, 0]),
373
+ dict(type='ToTensor'),
374
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
375
+ ],
376
+ # sample_size = 1200,
377
+ ),
378
+ ))
379
+ DSEC_dataset=dict(
380
+ data = dict(
381
+ train=dict(
382
+ pipeline=[dict(type='BGR2RGB'),
383
+ dict(type='LabelScaleCononical'),
384
+ dict(type='RandomResize',
385
+ prob=0.5,
386
+ ratio_range=(0.85, 1.15),
387
+ is_lidar=True),
388
+ dict(type='RandomCrop',
389
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
390
+ crop_type='rand',
391
+ ignore_label=-1,
392
+ padding=[0, 0, 0]),
393
+ dict(type='RandomEdgeMask',
394
+ mask_maxsize=50,
395
+ prob=0.2,
396
+ rgb_invalid=[0,0,0],
397
+ label_invalid=-1,),
398
+ dict(type='RandomHorizontalFlip',
399
+ prob=0.4),
400
+ dict(type='PhotoMetricDistortion',
401
+ to_gray_prob=0.1,
402
+ distortion_prob=0.1,),
403
+ dict(type='Weather',
404
+ prob=0.05),
405
+ dict(type='RandomBlur',
406
+ prob=0.05),
407
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
408
+ dict(type='ToTensor'),
409
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
410
+ ],
411
+ #sample_size = 10000,
412
+ ),
413
+ val=dict(
414
+ pipeline=[dict(type='BGR2RGB'),
415
+ dict(type='LabelScaleCononical'),
416
+ dict(type='RandomCrop',
417
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
418
+ crop_type='center',
419
+ ignore_label=-1,
420
+ padding=[0, 0, 0]),
421
+ dict(type='ToTensor'),
422
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
423
+ ],
424
+ sample_size = 1200,
425
+ ),
426
+ ))
427
+ DrivingStereo_dataset=dict(
428
+ data = dict(
429
+ train=dict(
430
+ pipeline=[dict(type='BGR2RGB'),
431
+ dict(type='LabelScaleCononical'),
432
+ dict(type='RandomResize',
433
+ ratio_range=(0.85, 1.15),
434
+ is_lidar=False),
435
+ dict(type='RandomCrop',
436
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
437
+ crop_type='rand',
438
+ ignore_label=-1,
439
+ padding=[0, 0, 0]),
440
+ dict(type='RandomEdgeMask',
441
+ mask_maxsize=50,
442
+ prob=0.2,
443
+ rgb_invalid=[0,0,0],
444
+ label_invalid=-1,),
445
+ dict(type='RandomHorizontalFlip',
446
+ prob=0.4),
447
+ dict(type='PhotoMetricDistortion',
448
+ to_gray_prob=0.1,
449
+ distortion_prob=0.1,),
450
+ dict(type='Weather',
451
+ prob=0.05),
452
+ dict(type='RandomBlur',
453
+ prob=0.05),
454
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
455
+ dict(type='ToTensor'),
456
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
457
+ ],
458
+ #sample_size = 10000,
459
+ ),
460
+ val=dict(
461
+ pipeline=[dict(type='BGR2RGB'),
462
+ dict(type='LabelScaleCononical'),
463
+ dict(type='RandomCrop',
464
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
465
+ crop_type='center',
466
+ ignore_label=-1,
467
+ padding=[0, 0, 0]),
468
+ dict(type='ToTensor'),
469
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
470
+ ],
471
+ sample_size = 1200,
472
+ ),
473
+ ))
474
+ MapillaryPSD_dataset=dict(
475
+ data = dict(
476
+ train=dict(
477
+ pipeline=[dict(type='BGR2RGB'),
478
+ dict(type='LabelScaleCononical'),
479
+ dict(type='RandomResize',
480
+ prob=0.5,
481
+ ratio_range=(0.85, 1.15),
482
+ is_lidar=True),
483
+ dict(type='RandomCrop',
484
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
485
+ crop_type='rand',
486
+ ignore_label=-1,
487
+ padding=[0, 0, 0]),
488
+ dict(type='RandomEdgeMask',
489
+ mask_maxsize=50,
490
+ prob=0.2,
491
+ rgb_invalid=[0,0,0],
492
+ label_invalid=-1,),
493
+ dict(type='RandomHorizontalFlip',
494
+ prob=0.4),
495
+ dict(type='PhotoMetricDistortion',
496
+ to_gray_prob=0.1,
497
+ distortion_prob=0.1,),
498
+ dict(type='Weather',
499
+ prob=0.05),
500
+ dict(type='RandomBlur',
501
+ prob=0.05),
502
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
503
+ dict(type='ToTensor'),
504
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
505
+ ],
506
+ #sample_size = 10000,
507
+ ),
508
+ val=dict(
509
+ pipeline=[dict(type='BGR2RGB'),
510
+ dict(type='LabelScaleCononical'),
511
+ dict(type='RandomCrop',
512
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
513
+ crop_type='center',
514
+ ignore_label=-1,
515
+ padding=[0, 0, 0]),
516
+ dict(type='ToTensor'),
517
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
518
+ ],
519
+ sample_size = 1200,
520
+ ),
521
+ ))
522
+ Pandaset_dataset=dict(
523
+ data = dict(
524
+ train=dict(
525
+ pipeline=[dict(type='BGR2RGB'),
526
+ dict(type='LabelScaleCononical'),
527
+ dict(type='RandomResize',
528
+ prob=0.5,
529
+ ratio_range=(0.85, 1.15),
530
+ is_lidar=True),
531
+ dict(type='RandomCrop',
532
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
533
+ crop_type='rand',
534
+ ignore_label=-1,
535
+ padding=[0, 0, 0]),
536
+ dict(type='RandomEdgeMask',
537
+ mask_maxsize=50,
538
+ prob=0.2,
539
+ rgb_invalid=[0,0,0],
540
+ label_invalid=-1,),
541
+ dict(type='RandomHorizontalFlip',
542
+ prob=0.4),
543
+ dict(type='PhotoMetricDistortion',
544
+ to_gray_prob=0.1,
545
+ distortion_prob=0.1,),
546
+ dict(type='Weather',
547
+ prob=0.05),
548
+ dict(type='RandomBlur',
549
+ prob=0.05),
550
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
551
+ dict(type='ToTensor'),
552
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
553
+ ],
554
+ #sample_size = 10000,
555
+ ),
556
+ val=dict(
557
+ pipeline=[dict(type='BGR2RGB'),
558
+ dict(type='LabelScaleCononical'),
559
+ dict(type='RandomCrop',
560
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
561
+ crop_type='center',
562
+ ignore_label=-1,
563
+ padding=[0, 0, 0]),
564
+ dict(type='ToTensor'),
565
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
566
+ ],
567
+ sample_size = 1200,
568
+ ),
569
+ ))
570
+ Taskonomy_dataset=dict(
571
+ data = dict(
572
+ train=dict(
573
+ pipeline=[dict(type='BGR2RGB'),
574
+ dict(type='LabelScaleCononical'),
575
+ dict(type='RandomResize',
576
+ prob=0.5,
577
+ ratio_range=(0.85, 1.15),
578
+ is_lidar=False),
579
+ dict(type='RandomCrop',
580
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
581
+ crop_type='rand',
582
+ ignore_label=-1,
583
+ padding=[0, 0, 0]),
584
+ dict(type='RandomEdgeMask',
585
+ mask_maxsize=50,
586
+ prob=0.2,
587
+ rgb_invalid=[0,0,0],
588
+ label_invalid=-1,),
589
+ dict(type='RandomHorizontalFlip',
590
+ prob=0.4),
591
+ dict(type='PhotoMetricDistortion',
592
+ to_gray_prob=0.1,
593
+ distortion_prob=0.1,),
594
+ dict(type='Weather',
595
+ prob=0.05),
596
+ dict(type='RandomBlur',
597
+ prob=0.05),
598
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
599
+ dict(type='ToTensor'),
600
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
601
+ ],
602
+ #sample_size = 10000,
603
+ ),
604
+ val=dict(
605
+ pipeline=[dict(type='BGR2RGB'),
606
+ dict(type='LabelScaleCononical'),
607
+ dict(type='RandomCrop',
608
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
609
+ crop_type='center',
610
+ ignore_label=-1,
611
+ padding=[0, 0, 0]),
612
+ dict(type='ToTensor'),
613
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
614
+ ],
615
+ sample_size = 1200,
616
+ ),
617
+ ))
618
+ UASOL_dataset=dict(
619
+ data = dict(
620
+ train=dict(
621
+ pipeline=[dict(type='BGR2RGB'),
622
+ dict(type='LabelScaleCononical'),
623
+ dict(type='RandomResize',
624
+ prob=0.5,
625
+ ratio_range=(0.85, 1.15),
626
+ is_lidar=False),
627
+ dict(type='RandomCrop',
628
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
629
+ crop_type='rand',
630
+ ignore_label=-1,
631
+ padding=[0, 0, 0]),
632
+ dict(type='RandomEdgeMask',
633
+ mask_maxsize=50,
634
+ prob=0.2,
635
+ rgb_invalid=[0,0,0],
636
+ label_invalid=-1,),
637
+ dict(type='RandomHorizontalFlip',
638
+ prob=0.4),
639
+ dict(type='PhotoMetricDistortion',
640
+ to_gray_prob=0.1,
641
+ distortion_prob=0.1,),
642
+ dict(type='Weather',
643
+ prob=0.05),
644
+ dict(type='RandomBlur',
645
+ prob=0.05),
646
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
647
+ dict(type='ToTensor'),
648
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
649
+ ],
650
+ #sample_size = 10000,
651
+ ),
652
+ val=dict(
653
+ pipeline=[dict(type='BGR2RGB'),
654
+ dict(type='LabelScaleCononical'),
655
+ dict(type='RandomCrop',
656
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
657
+ crop_type='center',
658
+ ignore_label=-1,
659
+ padding=[0, 0, 0]),
660
+ dict(type='ToTensor'),
661
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
662
+ ],
663
+ sample_size = 1200,
664
+ ),
665
+ ))
666
+ Waymo_dataset=dict(
667
+ data = dict(
668
+ train=dict(
669
+ pipeline=[dict(type='BGR2RGB'),
670
+ dict(type='LabelScaleCononical'),
671
+ dict(type='RandomResize',
672
+ prob=0.5,
673
+ ratio_range=(0.85, 1.15),
674
+ is_lidar=True),
675
+ dict(type='RandomCrop',
676
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
677
+ crop_type='rand',
678
+ ignore_label=-1,
679
+ padding=[0, 0, 0]),
680
+ dict(type='RandomEdgeMask',
681
+ mask_maxsize=50,
682
+ prob=0.2,
683
+ rgb_invalid=[0,0,0],
684
+ label_invalid=-1,),
685
+ dict(type='RandomHorizontalFlip',
686
+ prob=0.4),
687
+ dict(type='PhotoMetricDistortion',
688
+ to_gray_prob=0.1,
689
+ distortion_prob=0.1,),
690
+ dict(type='Weather',
691
+ prob=0.05),
692
+ dict(type='RandomBlur',
693
+ prob=0.05),
694
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
695
+ dict(type='ToTensor'),
696
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
697
+ ],
698
+ #sample_size = 10000,
699
+ ),
700
+ val=dict(
701
+ pipeline=[dict(type='BGR2RGB'),
702
+ dict(type='LabelScaleCononical'),
703
+ dict(type='RandomCrop',
704
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
705
+ crop_type='center',
706
+ ignore_label=-1,
707
+ padding=[0, 0, 0]),
708
+ dict(type='ToTensor'),
709
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
710
+ ],
711
+ sample_size = 1200,
712
+ ),
713
+ ))
714
+ Matterport3D_dataset=dict(
715
+ data = dict(
716
+ train=dict(
717
+ pipeline=[dict(type='BGR2RGB'),
718
+ dict(type='LabelScaleCononical'),
719
+ dict(type='RandomResize',
720
+ prob=0.5,
721
+ ratio_range=(0.85, 1.15),
722
+ is_lidar=False),
723
+ dict(type='RandomCrop',
724
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
725
+ crop_type='rand',
726
+ ignore_label=-1,
727
+ padding=[0, 0, 0]),
728
+ dict(type='RandomEdgeMask',
729
+ mask_maxsize=50,
730
+ prob=0.2,
731
+ rgb_invalid=[0,0,0],
732
+ label_invalid=-1,),
733
+ dict(type='RandomHorizontalFlip',
734
+ prob=0.4),
735
+ dict(type='PhotoMetricDistortion',
736
+ to_gray_prob=0.1,
737
+ distortion_prob=0.1,),
738
+ dict(type='Weather',
739
+ prob=0.05),
740
+ dict(type='RandomBlur',
741
+ prob=0.05),
742
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
743
+ dict(type='ToTensor'),
744
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
745
+ ],
746
+ #sample_size = 10000,
747
+ ),
748
+ val=dict(
749
+ pipeline=[dict(type='BGR2RGB'),
750
+ dict(type='LabelScaleCononical'),
751
+ dict(type='RandomCrop',
752
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
753
+ crop_type='center',
754
+ ignore_label=-1,
755
+ padding=[0, 0, 0]),
756
+ dict(type='ToTensor'),
757
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
758
+ ],
759
+ sample_size = 1200,
760
+ ),
761
+ ))
762
+ Replica_dataset=dict(
763
+ data = dict(
764
+ train=dict(
765
+ pipeline=[dict(type='BGR2RGB'),
766
+ dict(type='LabelScaleCononical'),
767
+ dict(type='RandomResize',
768
+ prob=0.5,
769
+ ratio_range=(0.85, 1.15),
770
+ is_lidar=False),
771
+ dict(type='RandomCrop',
772
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
773
+ crop_type='rand',
774
+ ignore_label=-1,
775
+ padding=[0, 0, 0]),
776
+ dict(type='RandomEdgeMask',
777
+ mask_maxsize=50,
778
+ prob=0.2,
779
+ rgb_invalid=[0,0,0],
780
+ label_invalid=-1,),
781
+ dict(type='RandomHorizontalFlip',
782
+ prob=0.4),
783
+ dict(type='PhotoMetricDistortion',
784
+ to_gray_prob=0.1,
785
+ distortion_prob=0.1,),
786
+ dict(type='Weather',
787
+ prob=0.05),
788
+ dict(type='RandomBlur',
789
+ prob=0.05),
790
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
791
+ dict(type='ToTensor'),
792
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
793
+ ],
794
+ #sample_size = 10000,
795
+ ),
796
+ val=dict(
797
+ pipeline=[dict(type='BGR2RGB'),
798
+ dict(type='LabelScaleCononical'),
799
+ dict(type='RandomCrop',
800
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
801
+ crop_type='center',
802
+ ignore_label=-1,
803
+ padding=[0, 0, 0]),
804
+ dict(type='ToTensor'),
805
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
806
+ ],
807
+ sample_size = 1200,
808
+ ),
809
+ ))
810
+ VKITTI_dataset=dict(
811
+ data = dict(
812
+ train=dict(
813
+ pipeline=[dict(type='BGR2RGB'),
814
+ dict(type='LabelScaleCononical'),
815
+ dict(type='RandomResize',
816
+ prob=0.5,
817
+ ratio_range=(0.85, 1.15),
818
+ is_lidar=False),
819
+ dict(type='RandomCrop',
820
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
821
+ crop_type='rand',
822
+ ignore_label=-1,
823
+ padding=[0, 0, 0]),
824
+ dict(type='RandomEdgeMask',
825
+ mask_maxsize=50,
826
+ prob=0.2,
827
+ rgb_invalid=[0,0,0],
828
+ label_invalid=-1,),
829
+ dict(type='RandomHorizontalFlip',
830
+ prob=0.4),
831
+ dict(type='PhotoMetricDistortion',
832
+ to_gray_prob=0.1,
833
+ distortion_prob=0.1,),
834
+ dict(type='Weather',
835
+ prob=0.05),
836
+ dict(type='RandomBlur',
837
+ prob=0.05),
838
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
839
+ dict(type='ToTensor'),
840
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
841
+ ],
842
+ #sample_size = 10000,
843
+ ),
844
+ val=dict(
845
+ pipeline=[dict(type='BGR2RGB'),
846
+ dict(type='LabelScaleCononical'),
847
+ dict(type='RandomCrop',
848
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
849
+ crop_type='center',
850
+ ignore_label=-1,
851
+ padding=[0, 0, 0]),
852
+ dict(type='ToTensor'),
853
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
854
+ ],
855
+ sample_size = 1200,
856
+ ),
857
+ ))
858
+ HM3D_dataset=dict(
859
+ data = dict(
860
+ train=dict(
861
+ pipeline=[dict(type='BGR2RGB'),
862
+ dict(type='LabelScaleCononical'),
863
+ dict(type='RandomResize',
864
+ prob=0.5,
865
+ ratio_range=(0.75, 1.3),
866
+ is_lidar=False),
867
+ dict(type='RandomCrop',
868
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
869
+ crop_type='rand',
870
+ ignore_label=-1,
871
+ padding=[0, 0, 0]),
872
+ dict(type='RandomEdgeMask',
873
+ mask_maxsize=50,
874
+ prob=0.2,
875
+ rgb_invalid=[0,0,0],
876
+ label_invalid=-1,),
877
+ dict(type='RandomHorizontalFlip',
878
+ prob=0.4),
879
+ dict(type='PhotoMetricDistortion',
880
+ to_gray_prob=0.1,
881
+ distortion_prob=0.1,),
882
+ dict(type='Weather',
883
+ prob=0.05),
884
+ dict(type='RandomBlur',
885
+ prob=0.05),
886
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
887
+ dict(type='ToTensor'),
888
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
889
+ ],
890
+ #sample_size = 10000,
891
+ ),
892
+ val=dict(
893
+ pipeline=[dict(type='BGR2RGB'),
894
+ dict(type='LabelScaleCononical'),
895
+ dict(type='RandomCrop',
896
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
897
+ crop_type='center',
898
+ ignore_label=-1,
899
+ padding=[0, 0, 0]),
900
+ dict(type='ToTensor'),
901
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
902
+ ],
903
+ sample_size = 1200,
904
+ ),
905
+ ))
906
+ BlendedMVG_omni_dataset=dict(
907
+ data = dict(
908
+ train=dict(
909
+ pipeline=[dict(type='BGR2RGB'),
910
+ dict(type='LabelScaleCononical'),
911
+ dict(type='RandomResize',
912
+ prob=0.5,
913
+ ratio_range=(0.75, 1.3),
914
+ is_lidar=False),
915
+ dict(type='RandomCrop',
916
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
917
+ crop_type='rand',
918
+ ignore_label=-1,
919
+ padding=[0, 0, 0]),
920
+ dict(type='RandomEdgeMask',
921
+ mask_maxsize=50,
922
+ prob=0.2,
923
+ rgb_invalid=[0,0,0],
924
+ label_invalid=-1,),
925
+ dict(type='RandomHorizontalFlip',
926
+ prob=0.4),
927
+ dict(type='PhotoMetricDistortion',
928
+ to_gray_prob=0.1,
929
+ distortion_prob=0.1,),
930
+ dict(type='Weather',
931
+ prob=0.05),
932
+ dict(type='RandomBlur',
933
+ prob=0.05),
934
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
935
+ dict(type='ToTensor'),
936
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
937
+ ],
938
+ ),
939
+ val=dict(
940
+ pipeline=[dict(type='BGR2RGB'),
941
+ dict(type='LabelScaleCononical'),
942
+ dict(type='RandomCrop',
943
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
944
+ crop_type='center',
945
+ ignore_label=-1,
946
+ padding=[0, 0, 0]),
947
+ dict(type='ToTensor'),
948
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
949
+ ],
950
+ ),
951
+ ))
952
+ ScanNetAll_dataset=dict(
953
+ data = dict(
954
+ train=dict(
955
+ pipeline=[dict(type='BGR2RGB'),
956
+ dict(type='LabelScaleCononical'),
957
+ dict(type='RandomResize',
958
+ prob=0.5,
959
+ ratio_range=(0.85, 1.15),
960
+ is_lidar=False),
961
+ dict(type='RandomCrop',
962
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
963
+ crop_type='rand',
964
+ ignore_label=-1,
965
+ padding=[0, 0, 0]),
966
+ dict(type='RandomEdgeMask',
967
+ mask_maxsize=50,
968
+ prob=0.2,
969
+ rgb_invalid=[0,0,0],
970
+ label_invalid=-1,),
971
+ dict(type='RandomHorizontalFlip',
972
+ prob=0.4),
973
+ dict(type='PhotoMetricDistortion',
974
+ to_gray_prob=0.1,
975
+ distortion_prob=0.1,),
976
+ dict(type='Weather',
977
+ prob=0.05),
978
+ dict(type='RandomBlur',
979
+ prob=0.05),
980
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
981
+ dict(type='ToTensor'),
982
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
983
+ ],
984
+ #sample_size = 10000,
985
+ ),
986
+ val=dict(
987
+ pipeline=[dict(type='BGR2RGB'),
988
+ dict(type='LabelScaleCononical'),
989
+ dict(type='RandomCrop',
990
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
991
+ crop_type='center',
992
+ ignore_label=-1,
993
+ padding=[0, 0, 0]),
994
+ dict(type='ToTensor'),
995
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
996
+ ],
997
+ sample_size = 1200,
998
+ ),
999
+ ))
1000
+ Hypersim_dataset=dict(
1001
+ data = dict(
1002
+ train=dict(
1003
+ pipeline=[dict(type='BGR2RGB'),
1004
+ dict(type='LabelScaleCononical'),
1005
+ dict(type='RandomResize',
1006
+ prob=0.5,
1007
+ ratio_range=(0.85, 1.15),
1008
+ is_lidar=False),
1009
+ dict(type='RandomCrop',
1010
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1011
+ crop_type='rand',
1012
+ ignore_label=-1,
1013
+ padding=[0, 0, 0]),
1014
+ dict(type='RandomEdgeMask',
1015
+ mask_maxsize=50,
1016
+ prob=0.2,
1017
+ rgb_invalid=[0,0,0],
1018
+ label_invalid=-1,),
1019
+ dict(type='RandomHorizontalFlip',
1020
+ prob=0.4),
1021
+ dict(type='PhotoMetricDistortion',
1022
+ to_gray_prob=0.1,
1023
+ distortion_prob=0.1,),
1024
+ dict(type='Weather',
1025
+ prob=0.05),
1026
+ dict(type='RandomBlur',
1027
+ prob=0.05),
1028
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
1029
+ dict(type='ToTensor'),
1030
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1031
+ ],
1032
+ #sample_size = 10000,
1033
+ ),
1034
+ val=dict(
1035
+ pipeline=[dict(type='BGR2RGB'),
1036
+ dict(type='LabelScaleCononical'),
1037
+ dict(type='RandomCrop',
1038
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1039
+ crop_type='center',
1040
+ ignore_label=-1,
1041
+ padding=[0, 0, 0]),
1042
+ dict(type='ToTensor'),
1043
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1044
+ ],
1045
+ sample_size = 1200,
1046
+ ),
1047
+ ))
external/Metric3D/training/mono/configs/RAFTDecoder/vit.raft5.small.sanity_check.py ADDED
@@ -0,0 +1,1014 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_=['../_base_/losses/all_losses.py',
2
+ '../_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py',
3
+
4
+ '../_base_/datasets/ddad.py',
5
+ '../_base_/datasets/_data_base_.py',
6
+ '../_base_/datasets/argovers2.py',
7
+ '../_base_/datasets/cityscapes.py',
8
+ '../_base_/datasets/drivingstereo.py',
9
+ '../_base_/datasets/dsec.py',
10
+ '../_base_/datasets/lyft.py',
11
+ '../_base_/datasets/mapillary_psd.py',
12
+ '../_base_/datasets/diml.py',
13
+ '../_base_/datasets/taskonomy.py',
14
+ '../_base_/datasets/uasol.py',
15
+ '../_base_/datasets/pandaset.py',
16
+ '../_base_/datasets/waymo.py',
17
+
18
+ '../_base_/default_runtime.py',
19
+ '../_base_/schedules/schedule_1m.py',
20
+
21
+ '../_base_/datasets/hm3d.py',
22
+ '../_base_/datasets/matterport3d.py',
23
+ '../_base_/datasets/replica.py',
24
+ '../_base_/datasets/vkitti.py',
25
+ ]
26
+
27
+ import numpy as np
28
+ model=dict(
29
+ decode_head=dict(
30
+ type='RAFTDepthNormalDPT5',
31
+ iters=4,
32
+ n_downsample=2,
33
+ detach=False,
34
+ ),
35
+ )
36
+
37
+ # loss method
38
+ losses=dict(
39
+ decoder_losses=[
40
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
41
+ dict(type='GRUSequenceLoss', loss_weight=0.5, loss_gamma=0.9, stereo_sup=0.0),
42
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
43
+ dict(type='HDNRandomLoss', loss_weight=0.5, random_num=10),
44
+ dict(type='HDSNRandomLoss', loss_weight=0.5, random_num=20, batch_limit=4),
45
+ dict(type='PWNPlanesLoss', loss_weight=1),
46
+ dict(type='NormalBranchLoss', loss_weight=1.0, loss_fn='NLL_ours_GRU'),
47
+ dict(type='DeNoConsistencyLoss', loss_weight=0.01, loss_fn='CEL', scale=2, depth_detach=True)
48
+ ],
49
+ gru_losses=[
50
+ dict(type='SkyRegularizationLoss', loss_weight=0.001, sample_ratio=0.4, regress_value=200, normal_regress=[0, 0, -1]),
51
+ ],
52
+ )
53
+
54
+ data_array = [
55
+ [
56
+ dict(Matterport3D='Matterport3D_dataset'), #14.4w
57
+ ],
58
+ ]
59
+
60
+
61
+
62
+ # configs of the canonical space
63
+ data_basic=dict(
64
+ canonical_space = dict(
65
+ # img_size=(540, 960),
66
+ focal_length=1000.0,
67
+ ),
68
+ depth_range=(0, 1),
69
+ depth_normalize=(0.1, 200),
70
+ # crop_size=(544, 1216),
71
+ # crop_size = (544, 992),
72
+ crop_size = (616, 1064), # %28 = 0
73
+ )
74
+
75
+ log_interval = 100
76
+ # online evaluation
77
+ # evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1', 'rmse'], multi_dataset_eval=True)
78
+ interval = 20000
79
+ evaluation = dict(
80
+ #online_eval=True,
81
+ online_eval=False,
82
+ interval=interval,
83
+ metrics=['abs_rel', 'delta1', 'rmse', 'normal_mean', 'normal_rmse', 'normal_a1'],
84
+ multi_dataset_eval=True,
85
+ )
86
+
87
+ # save checkpoint during training, with '*_AMP' is employing the automatic mix precision training
88
+ checkpoint_config = dict(by_epoch=False, interval=interval)
89
+ runner = dict(type='IterBasedRunner_AMP', max_iters=800010)
90
+
91
+ # optimizer
92
+ optimizer = dict(
93
+ type='AdamW',
94
+ # encoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
95
+ encoder=dict(lr=1e-5, betas=(0.9, 0.999), weight_decay=1e-3, eps=1e-6),
96
+ decoder=dict(lr=1e-4, betas=(0.9, 0.999), weight_decay=0.01, eps=1e-6),
97
+ )
98
+ # schedule
99
+ lr_config = dict(policy='poly',
100
+ warmup='linear',
101
+ warmup_iters=500,
102
+ warmup_ratio=1e-6,
103
+ power=0.9, min_lr=1e-6, by_epoch=False)
104
+
105
+ batchsize_per_gpu = 3
106
+ thread_per_gpu = 4
107
+
108
+ Argovers2_dataset=dict(
109
+ data = dict(
110
+ train=dict(
111
+ pipeline=[dict(type='BGR2RGB'),
112
+ dict(type='LabelScaleCononical'),
113
+ dict(type='RandomResize',
114
+ prob=0.5,
115
+ ratio_range=(0.85, 1.15),
116
+ is_lidar=True),
117
+ dict(type='RandomCrop',
118
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
119
+ crop_type='rand',
120
+ ignore_label=-1,
121
+ padding=[0, 0, 0]),
122
+ dict(type='RandomEdgeMask',
123
+ mask_maxsize=50,
124
+ prob=0.2,
125
+ rgb_invalid=[0,0,0],
126
+ label_invalid=-1,),
127
+ dict(type='RandomHorizontalFlip',
128
+ prob=0.4),
129
+ dict(type='PhotoMetricDistortion',
130
+ to_gray_prob=0.1,
131
+ distortion_prob=0.1,),
132
+ dict(type='Weather',
133
+ prob=0.05),
134
+ dict(type='RandomBlur',
135
+ prob=0.05),
136
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
137
+ dict(type='ToTensor'),
138
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
139
+ ],
140
+ #sample_size = 10000,
141
+ ),
142
+ val=dict(
143
+ pipeline=[dict(type='BGR2RGB'),
144
+ dict(type='LabelScaleCononical'),
145
+ dict(type='RandomCrop',
146
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
147
+ crop_type='center',
148
+ ignore_label=-1,
149
+ padding=[0, 0, 0]),
150
+ dict(type='ToTensor'),
151
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
152
+ ],
153
+ sample_size = 1200,
154
+ ),
155
+ ))
156
+ Cityscapes_dataset=dict(
157
+ data = dict(
158
+ train=dict(
159
+ pipeline=[dict(type='BGR2RGB'),
160
+ dict(type='LabelScaleCononical'),
161
+ dict(type='RandomResize',
162
+ ratio_range=(0.85, 1.15),
163
+ is_lidar=False),
164
+ dict(type='RandomCrop',
165
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
166
+ crop_type='rand',
167
+ ignore_label=-1,
168
+ padding=[0, 0, 0]),
169
+ dict(type='RandomEdgeMask',
170
+ mask_maxsize=50,
171
+ prob=0.2,
172
+ rgb_invalid=[0,0,0],
173
+ label_invalid=-1,),
174
+ dict(type='RandomHorizontalFlip',
175
+ prob=0.4),
176
+ dict(type='PhotoMetricDistortion',
177
+ to_gray_prob=0.1,
178
+ distortion_prob=0.1,),
179
+ dict(type='Weather',
180
+ prob=0.05),
181
+ dict(type='RandomBlur',
182
+ prob=0.05),
183
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
184
+ dict(type='ToTensor'),
185
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
186
+ ],
187
+ #sample_size = 10000,
188
+ ),
189
+ val=dict(
190
+ pipeline=[dict(type='BGR2RGB'),
191
+ dict(type='LabelScaleCononical'),
192
+ dict(type='RandomCrop',
193
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
194
+ crop_type='center',
195
+ ignore_label=-1,
196
+ padding=[0, 0, 0]),
197
+ dict(type='ToTensor'),
198
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
199
+ ],
200
+ sample_size = 1200,
201
+ ),
202
+ ))
203
+ DIML_dataset=dict(
204
+ data = dict(
205
+ train=dict(
206
+ pipeline=[dict(type='BGR2RGB'),
207
+ dict(type='LabelScaleCononical'),
208
+ dict(type='RandomResize',
209
+ ratio_range=(0.85, 1.15),
210
+ is_lidar=False),
211
+ dict(type='RandomCrop',
212
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
213
+ crop_type='rand',
214
+ ignore_label=-1,
215
+ padding=[0, 0, 0]),
216
+ dict(type='RandomEdgeMask',
217
+ mask_maxsize=50,
218
+ prob=0.2,
219
+ rgb_invalid=[0,0,0],
220
+ label_invalid=-1,),
221
+ dict(type='RandomHorizontalFlip',
222
+ prob=0.4),
223
+ dict(type='PhotoMetricDistortion',
224
+ to_gray_prob=0.1,
225
+ distortion_prob=0.1,),
226
+ dict(type='Weather',
227
+ prob=0.05),
228
+ dict(type='RandomBlur',
229
+ prob=0.05),
230
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
231
+ dict(type='ToTensor'),
232
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
233
+ ],
234
+ #sample_size = 10000,
235
+ ),
236
+ val=dict(
237
+ pipeline=[dict(type='BGR2RGB'),
238
+ dict(type='LabelScaleCononical'),
239
+ dict(type='RandomCrop',
240
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
241
+ crop_type='center',
242
+ ignore_label=-1,
243
+ padding=[0, 0, 0]),
244
+ dict(type='ToTensor'),
245
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
246
+ ],
247
+ sample_size = 1200,
248
+ ),
249
+ ))
250
+ Lyft_dataset=dict(
251
+ data = dict(
252
+ train=dict(
253
+ pipeline=[dict(type='BGR2RGB'),
254
+ dict(type='LabelScaleCononical'),
255
+ dict(type='RandomResize',
256
+ prob=0.5,
257
+ ratio_range=(0.85, 1.15),
258
+ is_lidar=True),
259
+ dict(type='RandomCrop',
260
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
261
+ crop_type='rand',
262
+ ignore_label=-1,
263
+ padding=[0, 0, 0]),
264
+ dict(type='RandomEdgeMask',
265
+ mask_maxsize=50,
266
+ prob=0.2,
267
+ rgb_invalid=[0,0,0],
268
+ label_invalid=-1,),
269
+ dict(type='RandomHorizontalFlip',
270
+ prob=0.4),
271
+ dict(type='PhotoMetricDistortion',
272
+ to_gray_prob=0.1,
273
+ distortion_prob=0.1,),
274
+ dict(type='Weather',
275
+ prob=0.05),
276
+ dict(type='RandomBlur',
277
+ prob=0.05),
278
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
279
+ dict(type='ToTensor'),
280
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
281
+ ],
282
+ #sample_size = 10000,
283
+ ),
284
+ val=dict(
285
+ pipeline=[dict(type='BGR2RGB'),
286
+ dict(type='LabelScaleCononical'),
287
+ dict(type='RandomCrop',
288
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
289
+ crop_type='center',
290
+ ignore_label=-1,
291
+ padding=[0, 0, 0]),
292
+ dict(type='ToTensor'),
293
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
294
+ ],
295
+ sample_size = 1200,
296
+ ),
297
+ ))
298
+ DDAD_dataset=dict(
299
+ data = dict(
300
+ train=dict(
301
+ pipeline=[dict(type='BGR2RGB'),
302
+ dict(type='LabelScaleCononical'),
303
+ dict(type='RandomResize',
304
+ prob=0.5,
305
+ ratio_range=(0.85, 1.15),
306
+ is_lidar=True),
307
+ dict(type='RandomCrop',
308
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
309
+ crop_type='rand',
310
+ ignore_label=-1,
311
+ padding=[0, 0, 0]),
312
+ dict(type='RandomEdgeMask',
313
+ mask_maxsize=50,
314
+ prob=0.2,
315
+ rgb_invalid=[0,0,0],
316
+ label_invalid=-1,),
317
+ dict(type='RandomHorizontalFlip',
318
+ prob=0.4),
319
+ dict(type='PhotoMetricDistortion',
320
+ to_gray_prob=0.1,
321
+ distortion_prob=0.1,),
322
+ dict(type='Weather',
323
+ prob=0.05),
324
+ dict(type='RandomBlur',
325
+ prob=0.05),
326
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
327
+ dict(type='ToTensor'),
328
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
329
+ ],
330
+ #sample_size = 10000,
331
+ ),
332
+ val=dict(
333
+ pipeline=[dict(type='BGR2RGB'),
334
+ dict(type='LabelScaleCononical'),
335
+ dict(type='RandomCrop',
336
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
337
+ crop_type='center',
338
+ ignore_label=-1,
339
+ padding=[0, 0, 0]),
340
+ dict(type='ToTensor'),
341
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
342
+ ],
343
+ # sample_size = 1200,
344
+ ),
345
+ ))
346
+ DSEC_dataset=dict(
347
+ data = dict(
348
+ train=dict(
349
+ pipeline=[dict(type='BGR2RGB'),
350
+ dict(type='LabelScaleCononical'),
351
+ dict(type='RandomResize',
352
+ prob=0.5,
353
+ ratio_range=(0.85, 1.15),
354
+ is_lidar=True),
355
+ dict(type='RandomCrop',
356
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
357
+ crop_type='rand',
358
+ ignore_label=-1,
359
+ padding=[0, 0, 0]),
360
+ dict(type='RandomEdgeMask',
361
+ mask_maxsize=50,
362
+ prob=0.2,
363
+ rgb_invalid=[0,0,0],
364
+ label_invalid=-1,),
365
+ dict(type='RandomHorizontalFlip',
366
+ prob=0.4),
367
+ dict(type='PhotoMetricDistortion',
368
+ to_gray_prob=0.1,
369
+ distortion_prob=0.1,),
370
+ dict(type='Weather',
371
+ prob=0.05),
372
+ dict(type='RandomBlur',
373
+ prob=0.05),
374
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
375
+ dict(type='ToTensor'),
376
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
377
+ ],
378
+ #sample_size = 10000,
379
+ ),
380
+ val=dict(
381
+ pipeline=[dict(type='BGR2RGB'),
382
+ dict(type='LabelScaleCononical'),
383
+ dict(type='RandomCrop',
384
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
385
+ crop_type='center',
386
+ ignore_label=-1,
387
+ padding=[0, 0, 0]),
388
+ dict(type='ToTensor'),
389
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
390
+ ],
391
+ sample_size = 1200,
392
+ ),
393
+ ))
394
+ DrivingStereo_dataset=dict(
395
+ data = dict(
396
+ train=dict(
397
+ pipeline=[dict(type='BGR2RGB'),
398
+ dict(type='LabelScaleCononical'),
399
+ dict(type='RandomResize',
400
+ ratio_range=(0.85, 1.15),
401
+ is_lidar=False),
402
+ dict(type='RandomCrop',
403
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
404
+ crop_type='rand',
405
+ ignore_label=-1,
406
+ padding=[0, 0, 0]),
407
+ dict(type='RandomEdgeMask',
408
+ mask_maxsize=50,
409
+ prob=0.2,
410
+ rgb_invalid=[0,0,0],
411
+ label_invalid=-1,),
412
+ dict(type='RandomHorizontalFlip',
413
+ prob=0.4),
414
+ dict(type='PhotoMetricDistortion',
415
+ to_gray_prob=0.1,
416
+ distortion_prob=0.1,),
417
+ dict(type='Weather',
418
+ prob=0.05),
419
+ dict(type='RandomBlur',
420
+ prob=0.05),
421
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
422
+ dict(type='ToTensor'),
423
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
424
+ ],
425
+ #sample_size = 10000,
426
+ ),
427
+ val=dict(
428
+ pipeline=[dict(type='BGR2RGB'),
429
+ dict(type='LabelScaleCononical'),
430
+ dict(type='RandomCrop',
431
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
432
+ crop_type='center',
433
+ ignore_label=-1,
434
+ padding=[0, 0, 0]),
435
+ dict(type='ToTensor'),
436
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
437
+ ],
438
+ sample_size = 1200,
439
+ ),
440
+ ))
441
+ MapillaryPSD_dataset=dict(
442
+ data = dict(
443
+ train=dict(
444
+ pipeline=[dict(type='BGR2RGB'),
445
+ dict(type='LabelScaleCononical'),
446
+ dict(type='RandomResize',
447
+ prob=0.5,
448
+ ratio_range=(0.85, 1.15),
449
+ is_lidar=True),
450
+ dict(type='RandomCrop',
451
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
452
+ crop_type='rand',
453
+ ignore_label=-1,
454
+ padding=[0, 0, 0]),
455
+ dict(type='RandomEdgeMask',
456
+ mask_maxsize=50,
457
+ prob=0.2,
458
+ rgb_invalid=[0,0,0],
459
+ label_invalid=-1,),
460
+ dict(type='RandomHorizontalFlip',
461
+ prob=0.4),
462
+ dict(type='PhotoMetricDistortion',
463
+ to_gray_prob=0.1,
464
+ distortion_prob=0.1,),
465
+ dict(type='Weather',
466
+ prob=0.05),
467
+ dict(type='RandomBlur',
468
+ prob=0.05),
469
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
470
+ dict(type='ToTensor'),
471
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
472
+ ],
473
+ #sample_size = 10000,
474
+ ),
475
+ val=dict(
476
+ pipeline=[dict(type='BGR2RGB'),
477
+ dict(type='LabelScaleCononical'),
478
+ dict(type='RandomCrop',
479
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
480
+ crop_type='center',
481
+ ignore_label=-1,
482
+ padding=[0, 0, 0]),
483
+ dict(type='ToTensor'),
484
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
485
+ ],
486
+ sample_size = 1200,
487
+ ),
488
+ ))
489
+ Pandaset_dataset=dict(
490
+ data = dict(
491
+ train=dict(
492
+ pipeline=[dict(type='BGR2RGB'),
493
+ dict(type='LabelScaleCononical'),
494
+ dict(type='RandomResize',
495
+ prob=0.5,
496
+ ratio_range=(0.85, 1.15),
497
+ is_lidar=True),
498
+ dict(type='RandomCrop',
499
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
500
+ crop_type='rand',
501
+ ignore_label=-1,
502
+ padding=[0, 0, 0]),
503
+ dict(type='RandomEdgeMask',
504
+ mask_maxsize=50,
505
+ prob=0.2,
506
+ rgb_invalid=[0,0,0],
507
+ label_invalid=-1,),
508
+ dict(type='RandomHorizontalFlip',
509
+ prob=0.4),
510
+ dict(type='PhotoMetricDistortion',
511
+ to_gray_prob=0.1,
512
+ distortion_prob=0.1,),
513
+ dict(type='Weather',
514
+ prob=0.05),
515
+ dict(type='RandomBlur',
516
+ prob=0.05),
517
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
518
+ dict(type='ToTensor'),
519
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
520
+ ],
521
+ #sample_size = 10000,
522
+ ),
523
+ val=dict(
524
+ pipeline=[dict(type='BGR2RGB'),
525
+ dict(type='LabelScaleCononical'),
526
+ dict(type='RandomCrop',
527
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
528
+ crop_type='center',
529
+ ignore_label=-1,
530
+ padding=[0, 0, 0]),
531
+ dict(type='ToTensor'),
532
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
533
+ ],
534
+ sample_size = 1200,
535
+ ),
536
+ ))
537
+ Taskonomy_dataset=dict(
538
+ data = dict(
539
+ train=dict(
540
+ pipeline=[dict(type='BGR2RGB'),
541
+ dict(type='LabelScaleCononical'),
542
+ dict(type='RandomResize',
543
+ prob=0.5,
544
+ ratio_range=(0.85, 1.15),
545
+ is_lidar=False),
546
+ dict(type='RandomCrop',
547
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
548
+ crop_type='rand',
549
+ ignore_label=-1,
550
+ padding=[0, 0, 0]),
551
+ dict(type='RandomEdgeMask',
552
+ mask_maxsize=50,
553
+ prob=0.2,
554
+ rgb_invalid=[0,0,0],
555
+ label_invalid=-1,),
556
+ dict(type='RandomHorizontalFlip',
557
+ prob=0.4),
558
+ dict(type='PhotoMetricDistortion',
559
+ to_gray_prob=0.1,
560
+ distortion_prob=0.1,),
561
+ dict(type='Weather',
562
+ prob=0.05),
563
+ dict(type='RandomBlur',
564
+ prob=0.05),
565
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
566
+ dict(type='ToTensor'),
567
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
568
+ ],
569
+ #sample_size = 10000,
570
+ ),
571
+ val=dict(
572
+ pipeline=[dict(type='BGR2RGB'),
573
+ dict(type='LabelScaleCononical'),
574
+ dict(type='RandomCrop',
575
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
576
+ crop_type='center',
577
+ ignore_label=-1,
578
+ padding=[0, 0, 0]),
579
+ dict(type='ToTensor'),
580
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
581
+ ],
582
+ sample_size = 1200,
583
+ ),
584
+ ))
585
+ UASOL_dataset=dict(
586
+ data = dict(
587
+ train=dict(
588
+ pipeline=[dict(type='BGR2RGB'),
589
+ dict(type='LabelScaleCononical'),
590
+ dict(type='RandomResize',
591
+ prob=0.5,
592
+ ratio_range=(0.85, 1.15),
593
+ is_lidar=False),
594
+ dict(type='RandomCrop',
595
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
596
+ crop_type='rand',
597
+ ignore_label=-1,
598
+ padding=[0, 0, 0]),
599
+ dict(type='RandomEdgeMask',
600
+ mask_maxsize=50,
601
+ prob=0.2,
602
+ rgb_invalid=[0,0,0],
603
+ label_invalid=-1,),
604
+ dict(type='RandomHorizontalFlip',
605
+ prob=0.4),
606
+ dict(type='PhotoMetricDistortion',
607
+ to_gray_prob=0.1,
608
+ distortion_prob=0.1,),
609
+ dict(type='Weather',
610
+ prob=0.05),
611
+ dict(type='RandomBlur',
612
+ prob=0.05),
613
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
614
+ dict(type='ToTensor'),
615
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
616
+ ],
617
+ #sample_size = 10000,
618
+ ),
619
+ val=dict(
620
+ pipeline=[dict(type='BGR2RGB'),
621
+ dict(type='LabelScaleCononical'),
622
+ dict(type='RandomCrop',
623
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
624
+ crop_type='center',
625
+ ignore_label=-1,
626
+ padding=[0, 0, 0]),
627
+ dict(type='ToTensor'),
628
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
629
+ ],
630
+ sample_size = 1200,
631
+ ),
632
+ ))
633
+ Waymo_dataset=dict(
634
+ data = dict(
635
+ train=dict(
636
+ pipeline=[dict(type='BGR2RGB'),
637
+ dict(type='LabelScaleCononical'),
638
+ dict(type='RandomResize',
639
+ prob=0.5,
640
+ ratio_range=(0.85, 1.15),
641
+ is_lidar=True),
642
+ dict(type='RandomCrop',
643
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
644
+ crop_type='rand',
645
+ ignore_label=-1,
646
+ padding=[0, 0, 0]),
647
+ dict(type='RandomEdgeMask',
648
+ mask_maxsize=50,
649
+ prob=0.2,
650
+ rgb_invalid=[0,0,0],
651
+ label_invalid=-1,),
652
+ dict(type='RandomHorizontalFlip',
653
+ prob=0.4),
654
+ dict(type='PhotoMetricDistortion',
655
+ to_gray_prob=0.1,
656
+ distortion_prob=0.1,),
657
+ dict(type='Weather',
658
+ prob=0.05),
659
+ dict(type='RandomBlur',
660
+ prob=0.05),
661
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
662
+ dict(type='ToTensor'),
663
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
664
+ ],
665
+ #sample_size = 10000,
666
+ ),
667
+ val=dict(
668
+ pipeline=[dict(type='BGR2RGB'),
669
+ dict(type='LabelScaleCononical'),
670
+ dict(type='RandomCrop',
671
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
672
+ crop_type='center',
673
+ ignore_label=-1,
674
+ padding=[0, 0, 0]),
675
+ dict(type='ToTensor'),
676
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
677
+ ],
678
+ sample_size = 1200,
679
+ ),
680
+ ))
681
+ Matterport3D_dataset=dict(
682
+ data = dict(
683
+ train=dict(
684
+ pipeline=[dict(type='BGR2RGB'),
685
+ dict(type='LabelScaleCononical'),
686
+ dict(type='RandomResize',
687
+ prob=0.5,
688
+ ratio_range=(0.85, 1.15),
689
+ is_lidar=False),
690
+ dict(type='RandomCrop',
691
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
692
+ crop_type='rand',
693
+ ignore_label=-1,
694
+ padding=[0, 0, 0]),
695
+ dict(type='RandomEdgeMask',
696
+ mask_maxsize=50,
697
+ prob=0.2,
698
+ rgb_invalid=[0,0,0],
699
+ label_invalid=-1,),
700
+ dict(type='RandomHorizontalFlip',
701
+ prob=0.4),
702
+ dict(type='PhotoMetricDistortion',
703
+ to_gray_prob=0.1,
704
+ distortion_prob=0.1,),
705
+ dict(type='Weather',
706
+ prob=0.05),
707
+ dict(type='RandomBlur',
708
+ prob=0.05),
709
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
710
+ dict(type='ToTensor'),
711
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
712
+ ],
713
+ #sample_size = 10000,
714
+ ),
715
+ val=dict(
716
+ pipeline=[dict(type='BGR2RGB'),
717
+ dict(type='LabelScaleCononical'),
718
+ dict(type='RandomCrop',
719
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
720
+ crop_type='center',
721
+ ignore_label=-1,
722
+ padding=[0, 0, 0]),
723
+ dict(type='ToTensor'),
724
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
725
+ ],
726
+ sample_size = 1200,
727
+ ),
728
+ ))
729
+ Replica_dataset=dict(
730
+ data = dict(
731
+ train=dict(
732
+ pipeline=[dict(type='BGR2RGB'),
733
+ dict(type='LabelScaleCononical'),
734
+ dict(type='RandomResize',
735
+ prob=0.5,
736
+ ratio_range=(0.85, 1.15),
737
+ is_lidar=False),
738
+ dict(type='RandomCrop',
739
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
740
+ crop_type='rand',
741
+ ignore_label=-1,
742
+ padding=[0, 0, 0]),
743
+ dict(type='RandomEdgeMask',
744
+ mask_maxsize=50,
745
+ prob=0.2,
746
+ rgb_invalid=[0,0,0],
747
+ label_invalid=-1,),
748
+ dict(type='RandomHorizontalFlip',
749
+ prob=0.4),
750
+ dict(type='PhotoMetricDistortion',
751
+ to_gray_prob=0.1,
752
+ distortion_prob=0.1,),
753
+ dict(type='Weather',
754
+ prob=0.05),
755
+ dict(type='RandomBlur',
756
+ prob=0.05),
757
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
758
+ dict(type='ToTensor'),
759
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
760
+ ],
761
+ #sample_size = 10000,
762
+ ),
763
+ val=dict(
764
+ pipeline=[dict(type='BGR2RGB'),
765
+ dict(type='LabelScaleCononical'),
766
+ dict(type='RandomCrop',
767
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
768
+ crop_type='center',
769
+ ignore_label=-1,
770
+ padding=[0, 0, 0]),
771
+ dict(type='ToTensor'),
772
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
773
+ ],
774
+ sample_size = 1200,
775
+ ),
776
+ ))
777
+ VKITTI_dataset=dict(
778
+ data = dict(
779
+ train=dict(
780
+ pipeline=[dict(type='BGR2RGB'),
781
+ dict(type='LabelScaleCononical'),
782
+ dict(type='RandomResize',
783
+ prob=0.5,
784
+ ratio_range=(0.85, 1.15),
785
+ is_lidar=False),
786
+ dict(type='RandomCrop',
787
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
788
+ crop_type='rand',
789
+ ignore_label=-1,
790
+ padding=[0, 0, 0]),
791
+ dict(type='RandomEdgeMask',
792
+ mask_maxsize=50,
793
+ prob=0.2,
794
+ rgb_invalid=[0,0,0],
795
+ label_invalid=-1,),
796
+ dict(type='RandomHorizontalFlip',
797
+ prob=0.4),
798
+ dict(type='PhotoMetricDistortion',
799
+ to_gray_prob=0.1,
800
+ distortion_prob=0.1,),
801
+ dict(type='Weather',
802
+ prob=0.05),
803
+ dict(type='RandomBlur',
804
+ prob=0.05),
805
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
806
+ dict(type='ToTensor'),
807
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
808
+ ],
809
+ #sample_size = 10000,
810
+ ),
811
+ val=dict(
812
+ pipeline=[dict(type='BGR2RGB'),
813
+ dict(type='LabelScaleCononical'),
814
+ dict(type='RandomCrop',
815
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
816
+ crop_type='center',
817
+ ignore_label=-1,
818
+ padding=[0, 0, 0]),
819
+ dict(type='ToTensor'),
820
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
821
+ ],
822
+ sample_size = 1200,
823
+ ),
824
+ ))
825
+ HM3D_dataset=dict(
826
+ data = dict(
827
+ train=dict(
828
+ pipeline=[dict(type='BGR2RGB'),
829
+ dict(type='LabelScaleCononical'),
830
+ dict(type='RandomResize',
831
+ prob=0.5,
832
+ ratio_range=(0.75, 1.3),
833
+ is_lidar=False),
834
+ dict(type='RandomCrop',
835
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
836
+ crop_type='rand',
837
+ ignore_label=-1,
838
+ padding=[0, 0, 0]),
839
+ dict(type='RandomEdgeMask',
840
+ mask_maxsize=50,
841
+ prob=0.2,
842
+ rgb_invalid=[0,0,0],
843
+ label_invalid=-1,),
844
+ dict(type='RandomHorizontalFlip',
845
+ prob=0.4),
846
+ dict(type='PhotoMetricDistortion',
847
+ to_gray_prob=0.1,
848
+ distortion_prob=0.1,),
849
+ dict(type='Weather',
850
+ prob=0.05),
851
+ dict(type='RandomBlur',
852
+ prob=0.05),
853
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
854
+ dict(type='ToTensor'),
855
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
856
+ ],
857
+ #sample_size = 10000,
858
+ ),
859
+ val=dict(
860
+ pipeline=[dict(type='BGR2RGB'),
861
+ dict(type='LabelScaleCononical'),
862
+ dict(type='RandomCrop',
863
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
864
+ crop_type='center',
865
+ ignore_label=-1,
866
+ padding=[0, 0, 0]),
867
+ dict(type='ToTensor'),
868
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
869
+ ],
870
+ sample_size = 1200,
871
+ ),
872
+ ))
873
+ BlendedMVG_omni_dataset=dict(
874
+ data = dict(
875
+ train=dict(
876
+ pipeline=[dict(type='BGR2RGB'),
877
+ dict(type='LabelScaleCononical'),
878
+ dict(type='RandomResize',
879
+ prob=0.5,
880
+ ratio_range=(0.75, 1.3),
881
+ is_lidar=False),
882
+ dict(type='RandomCrop',
883
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
884
+ crop_type='rand',
885
+ ignore_label=-1,
886
+ padding=[0, 0, 0]),
887
+ dict(type='RandomEdgeMask',
888
+ mask_maxsize=50,
889
+ prob=0.2,
890
+ rgb_invalid=[0,0,0],
891
+ label_invalid=-1,),
892
+ dict(type='RandomHorizontalFlip',
893
+ prob=0.4),
894
+ dict(type='PhotoMetricDistortion',
895
+ to_gray_prob=0.1,
896
+ distortion_prob=0.1,),
897
+ dict(type='Weather',
898
+ prob=0.05),
899
+ dict(type='RandomBlur',
900
+ prob=0.05),
901
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
902
+ dict(type='ToTensor'),
903
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
904
+ ],
905
+ ),
906
+ val=dict(
907
+ pipeline=[dict(type='BGR2RGB'),
908
+ dict(type='LabelScaleCononical'),
909
+ dict(type='RandomCrop',
910
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
911
+ crop_type='center',
912
+ ignore_label=-1,
913
+ padding=[0, 0, 0]),
914
+ dict(type='ToTensor'),
915
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
916
+ ],
917
+ ),
918
+ ))
919
+ ScanNetAll_dataset=dict(
920
+ data = dict(
921
+ train=dict(
922
+ pipeline=[dict(type='BGR2RGB'),
923
+ dict(type='LabelScaleCononical'),
924
+ dict(type='RandomResize',
925
+ prob=0.5,
926
+ ratio_range=(0.85, 1.15),
927
+ is_lidar=False),
928
+ dict(type='RandomCrop',
929
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
930
+ crop_type='rand',
931
+ ignore_label=-1,
932
+ padding=[0, 0, 0]),
933
+ dict(type='RandomEdgeMask',
934
+ mask_maxsize=50,
935
+ prob=0.2,
936
+ rgb_invalid=[0,0,0],
937
+ label_invalid=-1,),
938
+ dict(type='RandomHorizontalFlip',
939
+ prob=0.4),
940
+ dict(type='PhotoMetricDistortion',
941
+ to_gray_prob=0.1,
942
+ distortion_prob=0.1,),
943
+ dict(type='Weather',
944
+ prob=0.05),
945
+ dict(type='RandomBlur',
946
+ prob=0.05),
947
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
948
+ dict(type='ToTensor'),
949
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
950
+ ],
951
+ #sample_size = 10000,
952
+ ),
953
+ val=dict(
954
+ pipeline=[dict(type='BGR2RGB'),
955
+ dict(type='LabelScaleCononical'),
956
+ dict(type='RandomCrop',
957
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
958
+ crop_type='center',
959
+ ignore_label=-1,
960
+ padding=[0, 0, 0]),
961
+ dict(type='ToTensor'),
962
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
963
+ ],
964
+ sample_size = 1200,
965
+ ),
966
+ ))
967
+ Hypersim_dataset=dict(
968
+ data = dict(
969
+ train=dict(
970
+ pipeline=[dict(type='BGR2RGB'),
971
+ dict(type='LabelScaleCononical'),
972
+ dict(type='RandomResize',
973
+ prob=0.5,
974
+ ratio_range=(0.85, 1.15),
975
+ is_lidar=False),
976
+ dict(type='RandomCrop',
977
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
978
+ crop_type='rand',
979
+ ignore_label=-1,
980
+ padding=[0, 0, 0]),
981
+ dict(type='RandomEdgeMask',
982
+ mask_maxsize=50,
983
+ prob=0.2,
984
+ rgb_invalid=[0,0,0],
985
+ label_invalid=-1,),
986
+ dict(type='RandomHorizontalFlip',
987
+ prob=0.4),
988
+ dict(type='PhotoMetricDistortion',
989
+ to_gray_prob=0.1,
990
+ distortion_prob=0.1,),
991
+ dict(type='Weather',
992
+ prob=0.05),
993
+ dict(type='RandomBlur',
994
+ prob=0.05),
995
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
996
+ dict(type='ToTensor'),
997
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
998
+ ],
999
+ #sample_size = 10000,
1000
+ ),
1001
+ val=dict(
1002
+ pipeline=[dict(type='BGR2RGB'),
1003
+ dict(type='LabelScaleCononical'),
1004
+ dict(type='RandomCrop',
1005
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
1006
+ crop_type='center',
1007
+ ignore_label=-1,
1008
+ padding=[0, 0, 0]),
1009
+ dict(type='ToTensor'),
1010
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
1011
+ ],
1012
+ sample_size = 1200,
1013
+ ),
1014
+ ))
external/Metric3D/training/mono/configs/_base_/datasets/7scenes.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ SevenScenes_dataset=dict(
5
+ lib = 'SevenScenesDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = '7Scenes',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1000.0,
10
+ original_focal_length = 500,
11
+ original_size = (480, 640),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='ETH3D/annotations/test_annotations_new.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='ETH3D/annotations/test_annotations_new.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ # dict(type='AdjustSize',
56
+ # ignore_label=-1,
57
+ # padding=[123.675, 116.28, 103.53]),
58
+ dict(type='ToTensor'),
59
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
60
+ ],
61
+ sample_ratio = 1.0,
62
+ sample_size = 20,),
63
+ # configs for the training pipeline
64
+ test=dict(
65
+ anno_path='ETH3D/annotations/test_annotations_new.json',
66
+ pipeline=[dict(type='BGR2RGB'),
67
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
68
+ dict(type='ResizeKeepRatio',
69
+ resize_size=(512, 960),
70
+ ignore_label=-1,
71
+ padding=[0, 0, 0]),
72
+ # dict(type='RandomCrop',
73
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
74
+ # crop_type='center',
75
+ # ignore_label=-1,
76
+ # padding=[123.675, 116.28, 103.53]),
77
+ dict(type='ToTensor'),
78
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
79
+ ],
80
+ sample_ratio = 1.0,
81
+ sample_size = -1,),
82
+ ),
83
+ )
external/Metric3D/training/mono/configs/_base_/datasets/_data_base_.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # canonical camera setting and basic data setting
2
+
3
+ data_basic=dict(
4
+ canonical_space = dict(
5
+ img_size=(540, 960),
6
+ focal_length=1196.0,
7
+ ),
8
+ depth_range=(0.9, 150),
9
+ depth_normalize=(0.006, 1.001),
10
+ crop_size = (512, 960),
11
+ clip_depth_range=(0.1, 200),
12
+ )
external/Metric3D/training/mono/configs/_base_/datasets/argovers2.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ Argovers2_dataset=dict(
4
+ lib = 'Argovers2Dataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'Argovers2',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (1688.844624443858, 1776.8498213965734),
10
+ original_size = (1550, 2048),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Argovers2/annotations/train_annotations_wneigh.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Argovers2/annotations/val_annotations_wneigh.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Argovers2/annotations/test_annotations_wneigh.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ dict(type='ToTensor'),
69
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
70
+ ],
71
+ sample_ratio = 1.0,
72
+ sample_size = 6000,),
73
+ ),
74
+ )
external/Metric3D/training/mono/configs/_base_/datasets/blended_mvg.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ BlendedMVG_omni_dataset=dict(
5
+ lib = 'BlendedMVGOmniDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'BlendedMVG_omni',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 512.0,
10
+ original_focal_length = 575.6656,
11
+ original_size = (576, 768),
12
+ data_type='denselidar_nometric',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='BlendedMVG/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.1,
35
+ distortion_prob=0.05,),
36
+ dict(type='RandomBlur',
37
+ prob=0.05),
38
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
39
+ dict(type='ToTensor'),
40
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
41
+ ],),
42
+
43
+ # configs for the training pipeline
44
+ val=dict(
45
+ anno_path='BlendedMVG/annotations/test.json',
46
+ pipeline=[dict(type='BGR2RGB'),
47
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
48
+ dict(type='RandomCrop',
49
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
50
+ crop_type='center',
51
+ ignore_label=-1,
52
+ padding=[123.675, 116.28, 103.53]),
53
+ dict(type='ToTensor'),
54
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
55
+ ],
56
+ sample_ratio = 1.0,
57
+ sample_size = 5,),
58
+ # configs for the training pipeline
59
+ test=dict(
60
+ anno_path='BlendedMVG/annotations/test.json',
61
+ pipeline=[dict(type='BGR2RGB'),
62
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
63
+ dict(type='ResizeKeepRatio',
64
+ resize_size=(512, 960),
65
+ ignore_label=-1,
66
+ padding=[123.675, 116.28, 103.53]),
67
+ # dict(type='RandomCrop',
68
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
69
+ # crop_type='center',
70
+ # ignore_label=-1,
71
+ # padding=[123.675, 116.28, 103.53]),
72
+ dict(type='ToTensor'),
73
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
74
+ ],
75
+ sample_ratio = 1.0,
76
+ sample_size = -1,),
77
+ ),
78
+ )
external/Metric3D/training/mono/configs/_base_/datasets/cityscapes.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ Cityscapes_dataset=dict(
4
+ lib = 'CityscapesDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'Cityscapes',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (2263.9108952994275, 2263.9108952994275),
10
+ original_size = (1024, 2048),
11
+ data_type='stereo',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Cityscapes_sequence/annotations/train.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Cityscapes_sequence/annotations/val.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Cityscapes_sequence/annotations/test.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[123.675, 116.28, 103.53]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/ddad.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DDAD_dataset=dict(
4
+ lib = 'DDADDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DDAD',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (2181, 1060),
10
+ original_size = (1216, 1936),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='DDAD/annotations/train_annotations.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='DDAD/annotations/val_annotations.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='DDAD/annotations/test_annotations.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ # dict(type='LabelScaleCononical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960), #(1216, 1952), #
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = 800,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/ddad_any.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DDADAny_dataset=dict(
4
+ lib = 'AnyDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DDAD',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (2181, 1060),
10
+ original_size = (1216, 1936),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='DDAD/annotations/train_annotations.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='DDAD/annotations/val_annotations.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='DDAD/annotations/test_annotations.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = 6000,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/diml.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DIML_dataset=dict(
4
+ lib = 'DIMLDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DIML',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (1398.402, ),
10
+ original_size = (1080, 1920),
11
+ data_type='stereo',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='DIML/annotations/train.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='DIML/annotations/val.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='DIML/annotations/test.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[123.675, 116.28, 103.53]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/diml_indoor.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DIML_indoor_dataset=dict(
4
+ lib = 'DIMLDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DIML_indoor',
7
+ metric_scale = 1000.0,
8
+ data_type='stereo_nocamera',
9
+ data = dict(
10
+ # configs for the training pipeline
11
+ train=dict(
12
+ anno_path='DIML/annotations/train.json',
13
+ sample_ratio = 1.0,
14
+ sample_size = -1,
15
+ pipeline=[dict(type='BGR2RGB'),
16
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
17
+ dict(type='RandomCrop',
18
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
19
+ crop_type='rand',
20
+ ignore_label=-1,
21
+ padding=[0, 0, 0]),
22
+ dict(type='RandomEdgeMask',
23
+ mask_maxsize=50,
24
+ prob=0.2,
25
+ rgb_invalid=[0,0,0],
26
+ label_invalid=-1,),
27
+ dict(type='RandomHorizontalFlip',
28
+ prob=0.4),
29
+ dict(type='PhotoMetricDistortion',
30
+ to_gray_prob=0.2,
31
+ distortion_prob=0.1,),
32
+ dict(type='Weather',
33
+ prob=0.1),
34
+ dict(type='RandomBlur',
35
+ prob=0.05),
36
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
37
+ dict(type='ToTensor'),
38
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
39
+ ],),
40
+
41
+ # configs for the training pipeline
42
+ val=dict(
43
+ anno_path='DIML/annotations/val.json',
44
+ pipeline=[dict(type='BGR2RGB'),
45
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
46
+ dict(type='RandomCrop',
47
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
48
+ crop_type='center',
49
+ ignore_label=-1,
50
+ padding=[0, 0, 0]),
51
+ dict(type='ToTensor'),
52
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
53
+ ],
54
+ sample_ratio = 1.0,
55
+ sample_size = 20,),
56
+ # configs for the training pipeline
57
+ test=dict(
58
+ anno_path='DIML/annotations/test.json',
59
+ pipeline=[dict(type='BGR2RGB'),
60
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
61
+ dict(type='ResizeKeepRatio',
62
+ resize_size=(512, 960),
63
+ ignore_label=-1,
64
+ padding=[0, 0, 0]),
65
+ # dict(type='RandomCrop',
66
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
67
+ # crop_type='center',
68
+ # ignore_label=-1,
69
+ # padding=[123.675, 116.28, 103.53]),
70
+ dict(type='ToTensor'),
71
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
72
+ ],
73
+ sample_ratio = 1.0,
74
+ sample_size = -1,),
75
+ ),
76
+ )
external/Metric3D/training/mono/configs/_base_/datasets/diode.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ DIODE_dataset=dict(
5
+ lib = 'DIODEDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'DIODE',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1.0,
10
+ original_focal_length = 886.81,
11
+ original_size = (764, 1024),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='DIODE/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='DIODE/annotations/val.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 50,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='DIODE/annotations/test_annotations_new.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/drivingstereo.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DrivingStereo_dataset=dict(
4
+ lib = 'DrivingStereoDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DrivingStereo',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 256.0,
9
+ original_focal_length = (1006.938, 1003.556),
10
+ original_size = (400, 881),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='DrivingStereo/annotations/train_annotations.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='DrivingStereo/annotations/val_annotations.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='DrivingStereo/annotations/test_annotations.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[123.675, 116.28, 103.53]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/dsec.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ DSEC_dataset=dict(
4
+ lib = 'DSECDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'DSEC',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (1150.8943600390282, ),
10
+ original_size = (1080, 1440),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='DSEC/annotations/train_annotations_wtmpl.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='DSEC/annotations/val_annotations_wtmpl.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='DSEC/annotations/test_annotations_wtmpl.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[123.675, 116.28, 103.53]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/eth3d.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ ETH3D_dataset=dict(
5
+ lib = 'ETH3DDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'ETH3D',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1.0,
10
+ original_focal_length = 886.81,
11
+ original_size = (764, 1024),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='ETH3D/annotations/test_annotations_new.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='ETH3D/annotations/test_annotations_new.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='ETH3D/annotations/test_annotations_new.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/hm3d.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ HM3D_dataset=dict(
5
+ lib = 'HM3DDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'HM3D',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 512.0,
10
+ original_focal_length = 575.6656,
11
+ original_size = (512, 512),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='HM3D/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.2)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.0,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.1,
35
+ distortion_prob=0.05,),
36
+ dict(type='RandomBlur',
37
+ prob=0.05),
38
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
39
+ dict(type='ToTensor'),
40
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
41
+ ],),
42
+
43
+ # configs for the training pipeline
44
+ val=dict(
45
+ anno_path='HM3D/annotations/test.json',
46
+ pipeline=[dict(type='BGR2RGB'),
47
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
48
+ dict(type='RandomCrop',
49
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
50
+ crop_type='center',
51
+ ignore_label=-1,
52
+ padding=[0, 0, 0]),
53
+ dict(type='ToTensor'),
54
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
55
+ ],
56
+ sample_ratio = 1.0,
57
+ sample_size = 20,),
58
+ # configs for the training pipeline
59
+ test=dict(
60
+ anno_path='HM3D/annotations/test.json',
61
+ pipeline=[dict(type='BGR2RGB'),
62
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
63
+ dict(type='ResizeKeepRatio',
64
+ resize_size=(512, 960),
65
+ ignore_label=-1,
66
+ padding=[0, 0, 0]),
67
+ # dict(type='RandomCrop',
68
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
69
+ # crop_type='center',
70
+ # ignore_label=-1,
71
+ # padding=[123.675, 116.28, 103.53]),
72
+ dict(type='ToTensor'),
73
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
74
+ ],
75
+ sample_ratio = 1.0,
76
+ sample_size = -1,),
77
+ ),
78
+ )
external/Metric3D/training/mono/configs/_base_/datasets/hypersim.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ Hypersim_dataset=dict(
5
+ lib = 'HypersimDataset',
6
+ data_name = 'Hypersim',
7
+ metric_scale = 1.0,
8
+ data_type='denselidar_syn',
9
+ data = dict(
10
+ # configs for the training pipeline
11
+ train=dict(
12
+ sample_ratio = 1.0,
13
+ sample_size = -1,
14
+ pipeline=[dict(type='BGR2RGB'),
15
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.3)),
16
+ dict(type='RandomCrop',
17
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
18
+ crop_type='rand',
19
+ ignore_label=-1,
20
+ padding=[0, 0, 0]),
21
+ dict(type='RandomEdgeMask',
22
+ mask_maxsize=50,
23
+ prob=0.0,
24
+ rgb_invalid=[0,0,0],
25
+ label_invalid=-1,),
26
+ dict(type='RandomHorizontalFlip',
27
+ prob=0.4),
28
+ dict(type='PhotoMetricDistortion',
29
+ to_gray_prob=0.1,
30
+ distortion_prob=0.05,),
31
+ dict(type='RandomBlur',
32
+ prob=0.05),
33
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
34
+ dict(type='ToTensor'),
35
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
36
+ ],),
37
+
38
+ # configs for the training pipeline
39
+ val=dict(
40
+ pipeline=[dict(type='BGR2RGB'),
41
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
42
+ dict(type='RandomCrop',
43
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
44
+ crop_type='center',
45
+ ignore_label=-1,
46
+ padding=[0, 0, 0]),
47
+ dict(type='ToTensor'),
48
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
49
+ ],
50
+ sample_ratio = 1.0,
51
+ sample_size = 200,),
52
+ # configs for the training pipeline
53
+ test=dict(
54
+ pipeline=[dict(type='BGR2RGB'),
55
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
56
+ dict(type='ResizeKeepRatio',
57
+ resize_size=(512, 960),
58
+ ignore_label=-1,
59
+ padding=[0, 0, 0]),
60
+ # dict(type='RandomCrop',
61
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
62
+ # crop_type='center',
63
+ # ignore_label=-1,
64
+ # padding=[0, 0, 0]),
65
+ dict(type='ToTensor'),
66
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
67
+ ],
68
+ sample_ratio = 1.0,
69
+ sample_size = 2000,),
70
+ ),
71
+ )
external/Metric3D/training/mono/configs/_base_/datasets/ibims.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ IBIMS_dataset=dict(
5
+ lib = 'IBIMSDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'IBIMS',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1000.0,
10
+ original_focal_length = 518.857,
11
+ original_size = (480, 640),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='IBIMS/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='IBIMS/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='IBIMS/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/kitti.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ KITTI_dataset=dict(
5
+ lib = 'KITTIDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'KITTI',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 256.0,
10
+ original_focal_length = 518.857,
11
+ original_size = (480, 640),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='KITTI/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='KITTI/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='KITTI/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/leddarpixset.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ LeddarPixSet_dataset=dict(
4
+ lib = 'LeddarPixSetDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'LeddarPixSet',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (2181, 1060),
10
+ original_size = (1080, 1440),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='LeddarPixSet/annotations/train_annotations.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='LeddarPixSet/annotations/val_annotations.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 50,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='LeddarPixSet/annotations/test_annotations.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ # dict(type='LabelScaleCononical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960), #(1216, 1952), #
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[123.675, 116.28, 103.53]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/lyft.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ Lyft_dataset=dict(
4
+ lib = 'LyftDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'Lyft',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (877.406430795, 3416.79, 1108.782, 3986.358, 3427.04, ),
10
+ original_size = (1024, 1224),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Lyft/annotations/train_annotations_wtmpl.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Lyft/annotations/val_annotations_wtmpl.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Lyft/annotations/test_annotations_wtmpl.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = 6000,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/lyft_any.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ LyftAny_dataset=dict(
4
+ lib = 'AnyDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'Lyft',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (877.406430795, 880.82631362),
10
+ original_size = (1024, 1224),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Lyft/annotations/train_annotations_wtmpl.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Lyft/annotations/val_annotations_wtmpl.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Lyft/annotations/test_annotations_wtmpl.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[123.675, 116.28, 103.53]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = 6000,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/mapillary_psd.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ MapillaryPSD_dataset=dict(
4
+ lib = 'MapillaryPSDDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'MapillaryPSD',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 256.0,
9
+ original_focal_length = (1664.38, 1725.494, 1231.4812, 2576.447),
10
+ original_size = (1536, 2048),
11
+ data_type='sfm',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Mapillary_PSD/annotations/train_annotations.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriten by data_basic configs
22
+ crop_type='rand', # center, rand, rand_in_field
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Mapillary_PSD/annotations/val_annotations.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Mapillary_PSD/annotations/test_annotations.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/matterport3d.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ Matterport3D_dataset=dict(
5
+ lib = 'Matterport3DDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'Matterport3D',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 4000.0,
10
+ original_focal_length = 575.6656,
11
+ original_size = (1024, 1280),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='Matterport3D/annotations/test.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.1,
35
+ distortion_prob=0.05,),
36
+ dict(type='RandomBlur',
37
+ prob=0.05),
38
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
39
+ dict(type='ToTensor'),
40
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
41
+ ],),
42
+
43
+ # configs for the training pipeline
44
+ val=dict(
45
+ anno_path='Matterport3D/annotations/test.json',
46
+ pipeline=[dict(type='BGR2RGB'),
47
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
48
+ dict(type='RandomCrop',
49
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
50
+ crop_type='center',
51
+ ignore_label=-1,
52
+ padding=[0, 0, 0]),
53
+ dict(type='ToTensor'),
54
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
55
+ ],
56
+ sample_ratio = 1.0,
57
+ sample_size = 20,),
58
+ # configs for the training pipeline
59
+ test=dict(
60
+ anno_path='Matterport3D/annotations/test.json',
61
+ pipeline=[dict(type='BGR2RGB'),
62
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
63
+ dict(type='ResizeKeepRatio',
64
+ resize_size=(512, 960),
65
+ ignore_label=-1,
66
+ padding=[0, 0, 0]),
67
+ # dict(type='RandomCrop',
68
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
69
+ # crop_type='center',
70
+ # ignore_label=-1,
71
+ # padding=[0, 0, 0]),
72
+ dict(type='ToTensor'),
73
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
74
+ ],
75
+ sample_ratio = 1.0,
76
+ sample_size = -1,),
77
+ ),
78
+ )
external/Metric3D/training/mono/configs/_base_/datasets/nuscenes.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ NuScenes_dataset=dict(
4
+ lib = 'NuScenesDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'NuScenes',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (877.406430795, 1200.82631362),
10
+ original_size = (1024, 1224),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='NuScenes/annotations/train_annotations_wtmpl.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='NuScenes/annotations/val_annotations_wtmpl.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='NuScenes/annotations/test_annotations_wtmpl.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/nuscenes_any.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ NuScenesAny_dataset=dict(
4
+ lib = 'AnyDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'NuScenes',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (877.406430795, 1200.82631362),
10
+ original_size = (1024, 1224),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='NuScenes/annotations/train_annotations_wtmpl.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='NuScenes/annotations/val_annotations_wtmpl.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = -1,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='NuScenes/annotations/test_annotations_wtmpl.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = -1,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/nyu.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ NYU_dataset=dict(
5
+ lib = 'NYUDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'NYU',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 6000.0,
10
+ original_focal_length = 518.857,
11
+ original_size = (480, 640),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='NYU/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='NYU/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='NYU/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[0, 0, 0]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/pandaset.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ Pandaset_dataset=dict(
4
+ lib = 'PandasetDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'Pandaset',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (1970.01, 930.45, 929.84),
10
+ original_size = (1080, 1920),
11
+ data_type='lidar',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='Pandaset/annotations/annotations_train.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='LiDarResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomEdgeMask',
26
+ mask_maxsize=50,
27
+ prob=0.2,
28
+ rgb_invalid=[0,0,0],
29
+ label_invalid=-1,),
30
+ dict(type='RandomHorizontalFlip',
31
+ prob=0.4),
32
+ dict(type='PhotoMetricDistortion',
33
+ to_gray_prob=0.2,
34
+ distortion_prob=0.1,),
35
+ dict(type='Weather',
36
+ prob=0.1),
37
+ dict(type='RandomBlur',
38
+ prob=0.05),
39
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
40
+ dict(type='ToTensor'),
41
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
42
+ ],),
43
+
44
+ # configs for the training pipeline
45
+ val=dict(
46
+ anno_path='Pandaset/annotations/annotations_val.json',
47
+ pipeline=[dict(type='BGR2RGB'),
48
+ dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
49
+ dict(type='RandomCrop',
50
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
51
+ crop_type='center',
52
+ ignore_label=-1,
53
+ padding=[0, 0, 0]),
54
+ dict(type='ToTensor'),
55
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
56
+ ],
57
+ sample_ratio = 1.0,
58
+ sample_size = 20,),
59
+ # configs for the training pipeline
60
+ test=dict(
61
+ anno_path='Pandaset/annotations/annotations_test.json',
62
+ pipeline=[dict(type='BGR2RGB'),
63
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
64
+ dict(type='ResizeKeepRatio',
65
+ resize_size=(512, 960),
66
+ ignore_label=-1,
67
+ padding=[0, 0, 0]),
68
+ # dict(type='RandomCrop',
69
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
70
+ # crop_type='center',
71
+ # ignore_label=-1,
72
+ # padding=[0, 0, 0]),
73
+ dict(type='ToTensor'),
74
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
75
+ ],
76
+ sample_ratio = 1.0,
77
+ sample_size = 800,),
78
+ ),
79
+ )
external/Metric3D/training/mono/configs/_base_/datasets/replica.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ Replica_dataset=dict(
5
+ lib = 'ReplicaDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'Replica',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 512.0,
10
+ original_focal_length = 575.6656,
11
+ original_size = (512, 512),
12
+ data_type='denselidar_syn',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='Replica/annotations/test.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.1,
35
+ distortion_prob=0.05,),
36
+ dict(type='RandomBlur',
37
+ prob=0.05),
38
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
39
+ dict(type='ToTensor'),
40
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
41
+ ],),
42
+
43
+ # configs for the training pipeline
44
+ val=dict(
45
+ anno_path='Replica/annotations/test.json',
46
+ pipeline=[dict(type='BGR2RGB'),
47
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
48
+ dict(type='RandomCrop',
49
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
50
+ crop_type='center',
51
+ ignore_label=-1,
52
+ padding=[0, 0, 0]),
53
+ dict(type='ToTensor'),
54
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
55
+ ],
56
+ sample_ratio = 1.0,
57
+ sample_size = 50,),
58
+ # configs for the training pipeline
59
+ test=dict(
60
+ anno_path='Replica/annotations/test.json',
61
+ pipeline=[dict(type='BGR2RGB'),
62
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
63
+ dict(type='ResizeKeepRatio',
64
+ resize_size=(512, 960),
65
+ ignore_label=-1,
66
+ padding=[0, 0, 0]),
67
+ # dict(type='RandomCrop',
68
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
69
+ # crop_type='center',
70
+ # ignore_label=-1,
71
+ # padding=[0, 0, 0]),
72
+ dict(type='ToTensor'),
73
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
74
+ ],
75
+ sample_ratio = 1.0,
76
+ sample_size = 2000,),
77
+ ),
78
+ )
external/Metric3D/training/mono/configs/_base_/datasets/scannet.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ ScanNet_dataset=dict(
5
+ lib = 'ScanNetDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'ScanNet',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1000.0,
10
+ original_focal_length = 1165.371094,
11
+ original_size = (968, 1296),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='ScanNet/annotations/test.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='ScanNet/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='ScanNet/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[0, 0, 0]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/scannet_all.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ ScanNetAll_dataset=dict(
5
+ lib = 'ScanNetDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'ScanNetAll',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 1000.0,
10
+ original_focal_length = 1165.371094,
11
+ original_size = (968, 1296),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='ScanNet/annotations/test.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='ScanNet/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 20,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='ScanNet/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[0, 0, 0]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/taskonomy.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ Taskonomy_dataset=dict(
5
+ lib = 'TaskonomyDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'Taskonomy',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 512.0,
10
+ original_focal_length = 575.6656,
11
+ original_size = (512, 512),
12
+ data_type='denselidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='Taskonomy/annotations/test.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.3)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.0,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.1,
35
+ distortion_prob=0.05,),
36
+ dict(type='RandomBlur',
37
+ prob=0.05),
38
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 50)),
39
+ dict(type='ToTensor'),
40
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
41
+ ],),
42
+
43
+ # configs for the training pipeline
44
+ val=dict(
45
+ anno_path='Taskonomy/annotations/test.json',
46
+ pipeline=[dict(type='BGR2RGB'),
47
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
48
+ dict(type='RandomCrop',
49
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
50
+ crop_type='center',
51
+ ignore_label=-1,
52
+ padding=[0, 0, 0]),
53
+ dict(type='ToTensor'),
54
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
55
+ ],
56
+ sample_ratio = 1.0,
57
+ sample_size = 20,),
58
+ # configs for the training pipeline
59
+ test=dict(
60
+ anno_path='Taskonomy/annotations/test.json',
61
+ pipeline=[dict(type='BGR2RGB'),
62
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
63
+ dict(type='ResizeKeepRatio',
64
+ resize_size=(512, 960),
65
+ ignore_label=-1,
66
+ padding=[0, 0, 0]),
67
+ # dict(type='RandomCrop',
68
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
69
+ # crop_type='center',
70
+ # ignore_label=-1,
71
+ # padding=[0, 0, 0]),
72
+ dict(type='ToTensor'),
73
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
74
+ ],
75
+ sample_ratio = 1.0,
76
+ sample_size = 2000,),
77
+ ),
78
+ )
external/Metric3D/training/mono/configs/_base_/datasets/uasol.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+
3
+ UASOL_dataset=dict(
4
+ lib = 'UASOLDataset',
5
+ data_root = 'data/public_datasets',
6
+ data_name = 'UASOL',
7
+ transfer_to_canonical = True,
8
+ metric_scale = 200.0,
9
+ original_focal_length = (2263.9108952994275, 2263.9108952994275),
10
+ original_size = (1024, 2048),
11
+ data_type='stereo',
12
+ data = dict(
13
+ # configs for the training pipeline
14
+ train=dict(
15
+ anno_path='UASOL/annotations/train.json',
16
+ sample_ratio = 1.0,
17
+ sample_size = -1,
18
+ pipeline=[dict(type='BGR2RGB'),
19
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
20
+ dict(type='RandomCrop',
21
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
22
+ crop_type='rand',
23
+ ignore_label=-1,
24
+ padding=[0, 0, 0]),
25
+ dict(type='RandomHorizontalFlip',
26
+ prob=0.4),
27
+ dict(type='PhotoMetricDistortion',
28
+ to_gray_prob=0.2,
29
+ distortion_prob=0.1,),
30
+ dict(type='Weather',
31
+ prob=0.1),
32
+ dict(type='RandomBlur',
33
+ prob=0.05),
34
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
35
+ dict(type='ToTensor'),
36
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
37
+ ],),
38
+
39
+ # configs for the training pipeline
40
+ val=dict(
41
+ anno_path='UASOL/annotations/test_all.json',
42
+ pipeline=[dict(type='BGR2RGB'),
43
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
44
+ dict(type='RandomCrop',
45
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
46
+ crop_type='center',
47
+ ignore_label=-1,
48
+ padding=[0, 0, 0]),
49
+ dict(type='ToTensor'),
50
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
51
+ ],
52
+ sample_ratio = 1.0,
53
+ sample_size = 100,),
54
+ # configs for the training pipeline
55
+ test=dict(
56
+ anno_path='UASOL/annotations/test_all.json',
57
+ pipeline=[dict(type='BGR2RGB'),
58
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
59
+ dict(type='ResizeKeepRatio',
60
+ resize_size=(512, 960),
61
+ ignore_label=-1,
62
+ padding=[0, 0, 0]),
63
+ # dict(type='RandomCrop',
64
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
65
+ # crop_type='center',
66
+ # ignore_label=-1,
67
+ # padding=[0, 0, 0]),
68
+ dict(type='ToTensor'),
69
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
70
+ ],
71
+ sample_ratio = 1.0,
72
+ sample_size = -1,),
73
+ ),
74
+ )
external/Metric3D/training/mono/configs/_base_/datasets/vkitti.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ VKITTI_dataset=dict(
5
+ lib = 'VKITTIDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'VKITTI',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 100.0,
10
+ original_focal_length = 725.0087,
11
+ original_size = (375, 1242),
12
+ data_type='denselidar_syn',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='VKITTI/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='VKITTI/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 50,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='VKITTI/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[0, 0, 0]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/datasets/waymo.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ # data will resized/cropped to the canonical size, refer to ._data_base_.py
3
+
4
+ Waymo_dataset=dict(
5
+ lib = 'WaymoDataset',
6
+ data_root = 'data/public_datasets',
7
+ data_name = 'Waymo',
8
+ transfer_to_canonical = True,
9
+ metric_scale = 200.0,
10
+ original_focal_length = 2000.8,
11
+ original_size = (2000, 2000),
12
+ data_type='lidar',
13
+ data = dict(
14
+ # configs for the training pipeline
15
+ train=dict(
16
+ anno_path='Waymo/annotations/train.json',
17
+ sample_ratio = 1.0,
18
+ sample_size = -1,
19
+ pipeline=[dict(type='BGR2RGB'),
20
+ dict(type='ResizeCanonical', ratio_range=(0.9, 1.4)),
21
+ dict(type='RandomCrop',
22
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
23
+ crop_type='rand',
24
+ ignore_label=-1,
25
+ padding=[0, 0, 0]),
26
+ dict(type='RandomEdgeMask',
27
+ mask_maxsize=50,
28
+ prob=0.2,
29
+ rgb_invalid=[0,0,0],
30
+ label_invalid=-1,),
31
+ dict(type='RandomHorizontalFlip',
32
+ prob=0.4),
33
+ dict(type='PhotoMetricDistortion',
34
+ to_gray_prob=0.2,
35
+ distortion_prob=0.1,),
36
+ dict(type='Weather',
37
+ prob=0.1),
38
+ dict(type='RandomBlur',
39
+ prob=0.05),
40
+ dict(type='RGBCompresion', prob=0.1, compression=(0, 40)),
41
+ dict(type='ToTensor'),
42
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
43
+ ],),
44
+
45
+ # configs for the training pipeline
46
+ val=dict(
47
+ anno_path='Waymo/annotations/test.json',
48
+ pipeline=[dict(type='BGR2RGB'),
49
+ dict(type='ResizeCanonical', ratio_range=(1.0, 1.0)),
50
+ dict(type='RandomCrop',
51
+ crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
52
+ crop_type='center',
53
+ ignore_label=-1,
54
+ padding=[0, 0, 0]),
55
+ dict(type='ToTensor'),
56
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
57
+ ],
58
+ sample_ratio = 1.0,
59
+ sample_size = 50,),
60
+ # configs for the training pipeline
61
+ test=dict(
62
+ anno_path='Waymo/annotations/test.json',
63
+ pipeline=[dict(type='BGR2RGB'),
64
+ # dict(type='LiDarResizeCanonical', ratio_range=(1.0, 1.0)),
65
+ dict(type='ResizeKeepRatio',
66
+ resize_size=(512, 960),
67
+ ignore_label=-1,
68
+ padding=[0, 0, 0]),
69
+ # dict(type='RandomCrop',
70
+ # crop_size=(0,0), # crop_size will be overwriteen by data_basic configs
71
+ # crop_type='center',
72
+ # ignore_label=-1,
73
+ # padding=[0, 0, 0]),
74
+ dict(type='ToTensor'),
75
+ dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
76
+ ],
77
+ sample_ratio = 1.0,
78
+ sample_size = -1,),
79
+ ),
80
+ )
external/Metric3D/training/mono/configs/_base_/default_runtime.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # distributed training configs, if dist_url == 'env://'('tcp://127.0.0.1:6795'), nodes related configs should be set in the shell
2
+ dist_params = dict(port=None, backend='nccl', dist_url='env://')
3
+
4
+ log_name = 'tbd'
5
+ log_file = 'out.log'
6
+
7
+ load_from = None
8
+ resume_from = None
9
+
10
+ #workflow = [('train', 1)]
11
+ cudnn_benchmark = True
12
+ log_interval = 20
13
+
14
+ use_tensorboard = True
15
+
16
+ evaluation = dict(online_eval=True, interval=1000, metrics=['abs_rel', 'delta1'])
17
+ checkpoint_config = dict(by_epoch=False, interval=16000)
18
+
19
+
20
+ # runtime settings, IterBasedRunner or EpochBasedRunner, e.g. runner = dict(type='EpochBasedRunner', max_epoches=100)
21
+ runner = dict(type='IterBasedRunner', max_iters=160000)
22
+
23
+ test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'rmse_log', 'log10', 'sq_rel']
external/Metric3D/training/mono/configs/_base_/losses/all_losses.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ There are multiple losses can be applied.
3
+
4
+ dict(type='GradientLoss_Li', scale_num=4, loss_weight=1.0),
5
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
6
+ dict(type='SilogLoss', variance_focus=0.5, loss_weight=1.0),
7
+ dict(type='WCELoss', loss_weight=1.0, depth_normalize=(0.1, 1), bins_num=200)
8
+ dict(type='RegularizationLoss', loss_weight=0.1)
9
+ dict(type='EdgeguidedRankingLoss', loss_weight=1.0)
10
+ Note that out_channel and depth_normalize will be overwriten by configs in data_basic.
11
+ """
12
+
13
+ # loss_decode=[dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
14
+ # #dict(type='SilogLoss', variance_focus=0.5, loss_weight=1.0),
15
+ # dict(type='WCELoss', loss_weight=1.0, depth_normalize=(0, 0), out_channel=0)]
16
+
17
+ # loss_auxi = [#dict(type='WCELoss', loss_weight=1.0, depth_normalize=(0.1, 1), out_channel=200),
18
+ # ]
19
+ losses=dict(
20
+ decoder_losses=[
21
+ dict(type='VNLoss', sample_ratio=0.2, loss_weight=1.0),
22
+ dict(type='WCELoss', loss_weight=1.0, depth_normalize=(0, 0), out_channel=0),
23
+ ],
24
+ auxi_losses=[],
25
+ pose_losses=[],
26
+ )
external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_giant2_reg.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ backbone=dict(
3
+ type='vit_giant2_reg',
4
+ prefix='backbones.',
5
+ out_channels=[1536, 1536, 1536, 1536],
6
+ drop_path_rate = 0.0),
7
+ )
external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_large_reg.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ backbone=dict(
3
+ type='vit_large_reg',
4
+ prefix='backbones.',
5
+ out_channels=[1024, 1024, 1024, 1024],
6
+ drop_path_rate = 0.0),
7
+ )
external/Metric3D/training/mono/configs/_base_/models/backbones/dino_vit_small_reg.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ model = dict(
2
+ backbone=dict(
3
+ type='vit_small_reg',
4
+ prefix='backbones.',
5
+ out_channels=[384, 384, 384, 384],
6
+ drop_path_rate = 0.0),
7
+ )
external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_giant2_reg.dpt_raft.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ _base_ = ['../backbones/dino_vit_giant2_reg.py']
3
+ model = dict(
4
+ type='DensePredModel',
5
+ decode_head=dict(
6
+ type='RAFTDepthDPT',
7
+ in_channels=[1536, 1536, 1536, 1536],
8
+ use_cls_token=True,
9
+ feature_channels = [384, 768, 1536, 1536], # [2/7, 1/7, 1/14, 1/14]
10
+ decoder_channels = [192, 384, 768, 1536, 1536], # [4/7, 2/7, 1/7, 1/14, 1/14]
11
+ up_scale = 7,
12
+ hidden_channels=[192, 192, 192, 192], # [x_4, x_8, x_16, x_32] [192, 384, 768, 1536]
13
+ n_gru_layers=3,
14
+ n_downsample=2,
15
+ iters=3,
16
+ slow_fast_gru=True,
17
+ num_register_tokens=4,
18
+ prefix='decode_heads.'),
19
+ )
external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ _base_ = ['../backbones/dino_vit_large_reg.py']
3
+ model = dict(
4
+ type='DensePredModel',
5
+ decode_head=dict(
6
+ type='RAFTDepthDPT',
7
+ in_channels=[1024, 1024, 1024, 1024],
8
+ use_cls_token=True,
9
+ feature_channels = [256, 512, 1024, 1024], # [2/7, 1/7, 1/14, 1/14]
10
+ decoder_channels = [128, 256, 512, 1024, 1024], # [4/7, 2/7, 1/7, 1/14, 1/14]
11
+ up_scale = 7,
12
+ hidden_channels=[128, 128, 128, 128], # [x_4, x_8, x_16, x_32] [192, 384, 768, 1536]
13
+ n_gru_layers=3,
14
+ n_downsample=2,
15
+ iters=3,
16
+ slow_fast_gru=True,
17
+ num_register_tokens=4,
18
+ prefix='decode_heads.'),
19
+ )
external/Metric3D/training/mono/configs/_base_/models/encoder_decoder/dino_vit_small_reg.dpt_raft.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model settings
2
+ _base_ = ['../backbones/dino_vit_small_reg.py']
3
+ model = dict(
4
+ type='DensePredModel',
5
+ decode_head=dict(
6
+ type='RAFTDepthDPT',
7
+ in_channels=[384, 384, 384, 384],
8
+ use_cls_token=True,
9
+ feature_channels = [96, 192, 384, 768], # [2/7, 1/7, 1/14, 1/14]
10
+ decoder_channels = [48, 96, 192, 384, 384], # [-, 1/4, 1/7, 1/14, 1/14]
11
+ up_scale = 7,
12
+ hidden_channels=[48, 48, 48, 48], # [x_4, x_8, x_16, x_32] [1/4, 1/7, 1/14, -]
13
+ n_gru_layers=3,
14
+ n_downsample=2,
15
+ iters=3,
16
+ slow_fast_gru=True,
17
+ num_register_tokens=4,
18
+ prefix='decode_heads.'),
19
+ )