Spaces:
Running
Running
Upload 243 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +7 -0
- .gitignore +276 -0
- .gradio/certificate.pem +31 -0
- README.md +6 -5
- app.py +1985 -0
- configs/calibration_benchmark.yaml +23 -0
- configs/dataset/ase_wai/default.yaml +3 -0
- configs/dataset/ase_wai/train/default.yaml +26 -0
- configs/dataset/ase_wai/val/default.yaml +26 -0
- configs/dataset/bedlam_wai/default.yaml +3 -0
- configs/dataset/bedlam_wai/train/default.yaml +26 -0
- configs/dataset/bedlam_wai/val/default.yaml +26 -0
- configs/dataset/benchmark_512_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/benchmark_512_snpp_tav2.yaml +17 -0
- configs/dataset/benchmark_518_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/benchmark_518_snpp_tav2.yaml +17 -0
- configs/dataset/benchmark_sv_calib_518_many_ar_eth3d_snpp_tav2.yaml +20 -0
- configs/dataset/blendedmvs_wai/default.yaml +3 -0
- configs/dataset/blendedmvs_wai/train/default.yaml +26 -0
- configs/dataset/blendedmvs_wai/val/default.yaml +26 -0
- configs/dataset/default.yaml +45 -0
- configs/dataset/dl3dv_wai/default.yaml +3 -0
- configs/dataset/dl3dv_wai/train/default.yaml +28 -0
- configs/dataset/dl3dv_wai/val/default.yaml +28 -0
- configs/dataset/dtu_wai/default.yaml +2 -0
- configs/dataset/dtu_wai/test/default.yaml +22 -0
- configs/dataset/dynamicreplica_wai/default.yaml +3 -0
- configs/dataset/dynamicreplica_wai/train/default.yaml +26 -0
- configs/dataset/dynamicreplica_wai/val/default.yaml +26 -0
- configs/dataset/eth3d_wai/default.yaml +2 -0
- configs/dataset/eth3d_wai/test/default.yaml +22 -0
- configs/dataset/gta_sfm_wai/default.yaml +3 -0
- configs/dataset/gta_sfm_wai/train/default.yaml +26 -0
- configs/dataset/gta_sfm_wai/val/default.yaml +26 -0
- configs/dataset/matrixcity_wai/default.yaml +3 -0
- configs/dataset/matrixcity_wai/train/default.yaml +26 -0
- configs/dataset/matrixcity_wai/val/default.yaml +26 -0
- configs/dataset/megadepth_wai/default.yaml +3 -0
- configs/dataset/megadepth_wai/train/default.yaml +26 -0
- configs/dataset/megadepth_wai/val/default.yaml +26 -0
- configs/dataset/megatrain_11d_se_518_many_ar_48ipg_64g.yaml +53 -0
- configs/dataset/megatrain_12d_518_many_ar_24ipg_16g.yaml +56 -0
- configs/dataset/megatrain_13d_512_many_ar_24ipg_16g.yaml +59 -0
- configs/dataset/megatrain_13d_518_many_ar_24ipg_16g.yaml +59 -0
- configs/dataset/megatrain_13d_518_many_ar_48ipg_64g.yaml +59 -0
- configs/dataset/megatrain_6d_518_many_ar_48ipg_64g.yaml +38 -0
- configs/dataset/megatrain_6d_518_many_ar_48ipg_8g.yaml +38 -0
- configs/dataset/mpsd_wai/default.yaml +3 -0
- configs/dataset/mpsd_wai/train/default.yaml +26 -0
- configs/dataset/mpsd_wai/val/default.yaml +26 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
examples/**/*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
examples/**/*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
examples/**/*.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
examples/**/*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
examples/**/*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
examples/**/*.tif filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
examples/* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
# PyCharm
|
| 156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 157 |
+
# be added to the global gitignore or merged into this project gitignore. For a PyCharm
|
| 158 |
+
# project, it is recommended to ignore the entire .idea directory, or at least the following:
|
| 159 |
+
# .idea/workspace.xml
|
| 160 |
+
# .idea/tasks.xml
|
| 161 |
+
# .idea/usage.statistics.xml
|
| 162 |
+
# .idea/dictionaries
|
| 163 |
+
# .idea/shelf
|
| 164 |
+
|
| 165 |
+
# VS Code
|
| 166 |
+
.vscode/
|
| 167 |
+
*.code-workspace
|
| 168 |
+
|
| 169 |
+
# Local History for Visual Studio Code
|
| 170 |
+
.history/
|
| 171 |
+
|
| 172 |
+
# Built Visual Studio Code Extensions
|
| 173 |
+
*.vsix
|
| 174 |
+
|
| 175 |
+
# Hugging Face specific
|
| 176 |
+
# Model files (usually large binary files)
|
| 177 |
+
*.bin
|
| 178 |
+
*.safetensors
|
| 179 |
+
*.h5
|
| 180 |
+
*.onnx
|
| 181 |
+
*.pkl
|
| 182 |
+
*.pth
|
| 183 |
+
*.pt
|
| 184 |
+
*.ckpt
|
| 185 |
+
*.pb
|
| 186 |
+
*.tflite
|
| 187 |
+
*.mlmodel
|
| 188 |
+
|
| 189 |
+
# Hugging Face cache and tokens
|
| 190 |
+
.cache/
|
| 191 |
+
cache/
|
| 192 |
+
**/cache/
|
| 193 |
+
hf_token*
|
| 194 |
+
.huggingface/
|
| 195 |
+
transformers_cache/
|
| 196 |
+
datasets_cache/
|
| 197 |
+
input_images_*
|
| 198 |
+
|
| 199 |
+
# Gradio temporary files
|
| 200 |
+
gradio_cached_examples/
|
| 201 |
+
flagged/
|
| 202 |
+
|
| 203 |
+
# Data directories
|
| 204 |
+
data/
|
| 205 |
+
checkpoints/
|
| 206 |
+
outputs/
|
| 207 |
+
results/
|
| 208 |
+
logs/
|
| 209 |
+
tmp/
|
| 210 |
+
temp/
|
| 211 |
+
# examples/*/
|
| 212 |
+
# /examples*.jpg
|
| 213 |
+
# *.png
|
| 214 |
+
# *.jpeg
|
| 215 |
+
# examples/
|
| 216 |
+
|
| 217 |
+
# OS generated files
|
| 218 |
+
.DS_Store
|
| 219 |
+
.DS_Store?
|
| 220 |
+
._*
|
| 221 |
+
.Spotlight-V100
|
| 222 |
+
.Trashes
|
| 223 |
+
ehthumbs.db
|
| 224 |
+
Thumbs.db
|
| 225 |
+
desktop.ini
|
| 226 |
+
|
| 227 |
+
# Backup files
|
| 228 |
+
*.bak
|
| 229 |
+
*.swp
|
| 230 |
+
*.swo
|
| 231 |
+
*~
|
| 232 |
+
|
| 233 |
+
# Compressed files
|
| 234 |
+
*.7z
|
| 235 |
+
*.dmg
|
| 236 |
+
*.gz
|
| 237 |
+
*.iso
|
| 238 |
+
*.jar
|
| 239 |
+
*.rar
|
| 240 |
+
*.tar
|
| 241 |
+
*.zip
|
| 242 |
+
|
| 243 |
+
# IDE and editor files
|
| 244 |
+
.idea/
|
| 245 |
+
*.sublime-project
|
| 246 |
+
*.sublime-workspace
|
| 247 |
+
.vscode/settings.json
|
| 248 |
+
.vscode/tasks.json
|
| 249 |
+
.vscode/launch.json
|
| 250 |
+
.vscode/extensions.json
|
| 251 |
+
|
| 252 |
+
# Node modules (if any frontend components)
|
| 253 |
+
node_modules/
|
| 254 |
+
npm-debug.log*
|
| 255 |
+
yarn-debug.log*
|
| 256 |
+
yarn-error.log*
|
| 257 |
+
|
| 258 |
+
# Docker
|
| 259 |
+
Dockerfile*
|
| 260 |
+
docker-compose*
|
| 261 |
+
.dockerignore
|
| 262 |
+
|
| 263 |
+
# MLOps and experiment tracking
|
| 264 |
+
wandb/
|
| 265 |
+
.neptune/
|
| 266 |
+
mlruns/
|
| 267 |
+
.mlflow/
|
| 268 |
+
tensorboard_logs/
|
| 269 |
+
|
| 270 |
+
# Secrets and configuration
|
| 271 |
+
*.secret
|
| 272 |
+
*.key
|
| 273 |
+
config.ini
|
| 274 |
+
.env.local
|
| 275 |
+
.env.*.local
|
| 276 |
+
secrets.json
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
README.md
CHANGED
|
@@ -1,12 +1,13 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Mapanything Gradio
|
| 3 |
+
emoji: 🐠
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: green
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.44.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
---
|
| 12 |
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,1985 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
MapAnything V2: 3D Reconstruction with Object Segmentation
|
| 9 |
+
- Multi-view 3D reconstruction
|
| 10 |
+
- GroundingDINO object detection
|
| 11 |
+
- SAM precise segmentation
|
| 12 |
+
- DBSCAN clustering for cross-view object matching
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import gc
|
| 16 |
+
import os
|
| 17 |
+
import shutil
|
| 18 |
+
import sys
|
| 19 |
+
import time
|
| 20 |
+
from datetime import datetime
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from collections import defaultdict
|
| 23 |
+
|
| 24 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 25 |
+
|
| 26 |
+
import cv2
|
| 27 |
+
import gradio as gr
|
| 28 |
+
import numpy as np
|
| 29 |
+
import spaces
|
| 30 |
+
import torch
|
| 31 |
+
import trimesh
|
| 32 |
+
from PIL import Image
|
| 33 |
+
from pillow_heif import register_heif_opener
|
| 34 |
+
from sklearn.cluster import DBSCAN
|
| 35 |
+
|
| 36 |
+
register_heif_opener()
|
| 37 |
+
|
| 38 |
+
sys.path.append("mapanything/")
|
| 39 |
+
|
| 40 |
+
from mapanything.utils.geometry import depthmap_to_world_frame, points_to_normals
|
| 41 |
+
from mapanything.utils.hf_utils.css_and_html import (
|
| 42 |
+
GRADIO_CSS,
|
| 43 |
+
MEASURE_INSTRUCTIONS_HTML,
|
| 44 |
+
get_acknowledgements_html,
|
| 45 |
+
get_description_html,
|
| 46 |
+
get_gradio_theme,
|
| 47 |
+
get_header_html,
|
| 48 |
+
)
|
| 49 |
+
from mapanything.utils.hf_utils.hf_helpers import initialize_mapanything_model
|
| 50 |
+
from mapanything.utils.hf_utils.visual_util import predictions_to_glb
|
| 51 |
+
from mapanything.utils.image import load_images, rgb
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def get_logo_base64():
|
| 55 |
+
"""Convert WAI logo to base64 for embedding in HTML"""
|
| 56 |
+
import base64
|
| 57 |
+
|
| 58 |
+
logo_path = "examples/WAI-Logo/wai_logo.png"
|
| 59 |
+
try:
|
| 60 |
+
with open(logo_path, "rb") as img_file:
|
| 61 |
+
img_data = img_file.read()
|
| 62 |
+
base64_str = base64.b64encode(img_data).decode()
|
| 63 |
+
return f"data:image/png;base64,{base64_str}"
|
| 64 |
+
except FileNotFoundError:
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# ============================================================================
|
| 69 |
+
# Configuration
|
| 70 |
+
# ============================================================================
|
| 71 |
+
|
| 72 |
+
# MapAnything Configuration
|
| 73 |
+
high_level_config = {
|
| 74 |
+
"path": "configs/train.yaml",
|
| 75 |
+
"hf_model_name": "facebook/map-anything",
|
| 76 |
+
"model_str": "mapanything",
|
| 77 |
+
"config_overrides": [
|
| 78 |
+
"machine=aws",
|
| 79 |
+
"model=mapanything",
|
| 80 |
+
"model/task=images_only",
|
| 81 |
+
"model.encoder.uses_torch_hub=false",
|
| 82 |
+
],
|
| 83 |
+
"checkpoint_name": "model.safetensors",
|
| 84 |
+
"config_name": "config.json",
|
| 85 |
+
"trained_with_amp": True,
|
| 86 |
+
"trained_with_amp_dtype": "bf16",
|
| 87 |
+
"data_norm_type": "dinov2",
|
| 88 |
+
"patch_size": 14,
|
| 89 |
+
"resolution": 518,
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
# GroundingDINO and SAM Configuration
|
| 93 |
+
GROUNDING_DINO_MODEL_ID = "IDEA-Research/grounding-dino-tiny"
|
| 94 |
+
GROUNDING_DINO_BOX_THRESHOLD = 0.25
|
| 95 |
+
GROUNDING_DINO_TEXT_THRESHOLD = 0.2
|
| 96 |
+
|
| 97 |
+
SAM_MODEL_ID = "facebook/sam-vit-huge"
|
| 98 |
+
|
| 99 |
+
DEFAULT_TEXT_PROMPT = "chair . table . sofa . bed . desk . cabinet"
|
| 100 |
+
|
| 101 |
+
# Common objects prompt for detection
|
| 102 |
+
COMMON_OBJECTS_PROMPT = (
|
| 103 |
+
"person . face . hand . "
|
| 104 |
+
"chair . sofa . couch . bed . table . desk . cabinet . shelf . drawer . "
|
| 105 |
+
"door . window . wall . floor . ceiling . curtain . "
|
| 106 |
+
"tv . monitor . screen . computer . laptop . keyboard . mouse . "
|
| 107 |
+
"phone . tablet . remote . "
|
| 108 |
+
"lamp . light . chandelier . "
|
| 109 |
+
"book . magazine . paper . pen . pencil . "
|
| 110 |
+
"bottle . cup . glass . mug . plate . bowl . fork . knife . spoon . "
|
| 111 |
+
"vase . plant . flower . pot . "
|
| 112 |
+
"clock . picture . frame . mirror . "
|
| 113 |
+
"pillow . cushion . blanket . towel . "
|
| 114 |
+
"bag . backpack . suitcase . "
|
| 115 |
+
"box . basket . container . "
|
| 116 |
+
"shoe . hat . coat . "
|
| 117 |
+
"toy . ball . "
|
| 118 |
+
"car . bicycle . motorcycle . bus . truck . "
|
| 119 |
+
"tree . grass . sky . cloud . sun . "
|
| 120 |
+
"dog . cat . bird . "
|
| 121 |
+
"building . house . bridge . road . street . "
|
| 122 |
+
"sign . pole . bench"
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# DBSCAN clustering configuration (eps in meters)
|
| 126 |
+
DBSCAN_EPS_CONFIG = {
|
| 127 |
+
'sofa': 1.5,
|
| 128 |
+
'bed': 1.5,
|
| 129 |
+
'couch': 1.5,
|
| 130 |
+
'desk': 0.8,
|
| 131 |
+
'table': 0.8,
|
| 132 |
+
'chair': 0.6,
|
| 133 |
+
'cabinet': 0.8,
|
| 134 |
+
'window': 0.5,
|
| 135 |
+
'door': 0.6,
|
| 136 |
+
'tv': 0.6,
|
| 137 |
+
'default': 1.0
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
DBSCAN_MIN_SAMPLES = 1
|
| 141 |
+
|
| 142 |
+
# Quality control
|
| 143 |
+
MIN_DETECTION_CONFIDENCE = 0.35
|
| 144 |
+
MIN_MASK_AREA = 100
|
| 145 |
+
|
| 146 |
+
# Global model variables
|
| 147 |
+
model = None
|
| 148 |
+
grounding_dino_model = None
|
| 149 |
+
grounding_dino_processor = None
|
| 150 |
+
sam_predictor = None
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
# ============================================================================
|
| 154 |
+
# Model Loading Functions
|
| 155 |
+
# ============================================================================
|
| 156 |
+
|
| 157 |
+
def load_grounding_dino_model(device):
|
| 158 |
+
"""Load GroundingDINO model from HuggingFace"""
|
| 159 |
+
global grounding_dino_model, grounding_dino_processor
|
| 160 |
+
|
| 161 |
+
if grounding_dino_model is not None:
|
| 162 |
+
print("✅ GroundingDINO already loaded")
|
| 163 |
+
return
|
| 164 |
+
|
| 165 |
+
try:
|
| 166 |
+
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
|
| 167 |
+
|
| 168 |
+
print(f"📥 Loading GroundingDINO from HuggingFace: {GROUNDING_DINO_MODEL_ID}")
|
| 169 |
+
grounding_dino_processor = AutoProcessor.from_pretrained(GROUNDING_DINO_MODEL_ID)
|
| 170 |
+
grounding_dino_model = AutoModelForZeroShotObjectDetection.from_pretrained(
|
| 171 |
+
GROUNDING_DINO_MODEL_ID
|
| 172 |
+
).to(device).eval()
|
| 173 |
+
|
| 174 |
+
print("✅ GroundingDINO loaded successfully")
|
| 175 |
+
|
| 176 |
+
except Exception as e:
|
| 177 |
+
print(f"❌ GroundingDINO loading failed: {e}")
|
| 178 |
+
import traceback
|
| 179 |
+
traceback.print_exc()
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def load_sam_model(device):
|
| 183 |
+
"""Load SAM model from HuggingFace"""
|
| 184 |
+
global sam_predictor
|
| 185 |
+
|
| 186 |
+
if sam_predictor is not None:
|
| 187 |
+
print("✅ SAM already loaded")
|
| 188 |
+
return
|
| 189 |
+
|
| 190 |
+
try:
|
| 191 |
+
from transformers import SamModel, SamProcessor
|
| 192 |
+
|
| 193 |
+
print(f"📥 Loading SAM from HuggingFace: {SAM_MODEL_ID}")
|
| 194 |
+
sam_model = SamModel.from_pretrained(SAM_MODEL_ID).to(device).eval()
|
| 195 |
+
sam_processor = SamProcessor.from_pretrained(SAM_MODEL_ID)
|
| 196 |
+
|
| 197 |
+
# Wrap in a predictor-like interface
|
| 198 |
+
class SAMPredictor:
|
| 199 |
+
def __init__(self, model, processor, device):
|
| 200 |
+
self.model = model
|
| 201 |
+
self.processor = processor
|
| 202 |
+
self.device = device
|
| 203 |
+
self.image = None
|
| 204 |
+
|
| 205 |
+
def set_image(self, image):
|
| 206 |
+
"""Set image for prediction"""
|
| 207 |
+
if image.dtype == np.uint8:
|
| 208 |
+
self.image = Image.fromarray(image)
|
| 209 |
+
else:
|
| 210 |
+
self.image = Image.fromarray((image * 255).astype(np.uint8))
|
| 211 |
+
|
| 212 |
+
def predict(self, box, multimask_output=False):
|
| 213 |
+
"""Predict mask from box"""
|
| 214 |
+
inputs = self.processor(
|
| 215 |
+
self.image,
|
| 216 |
+
input_boxes=[[[box]]],
|
| 217 |
+
return_tensors="pt"
|
| 218 |
+
).to(self.device)
|
| 219 |
+
|
| 220 |
+
with torch.no_grad():
|
| 221 |
+
outputs = self.model(**inputs)
|
| 222 |
+
|
| 223 |
+
masks = self.processor.image_processor.post_process_masks(
|
| 224 |
+
outputs.pred_masks.cpu(),
|
| 225 |
+
inputs["original_sizes"].cpu(),
|
| 226 |
+
inputs["reshaped_input_sizes"].cpu()
|
| 227 |
+
)[0].squeeze().numpy()
|
| 228 |
+
|
| 229 |
+
if len(masks.shape) == 2:
|
| 230 |
+
masks = masks[np.newaxis, ...]
|
| 231 |
+
|
| 232 |
+
return masks, None, None
|
| 233 |
+
|
| 234 |
+
sam_predictor = SAMPredictor(sam_model, sam_processor, device)
|
| 235 |
+
print("✅ SAM loaded successfully")
|
| 236 |
+
|
| 237 |
+
except Exception as e:
|
| 238 |
+
print(f"❌ SAM loading failed: {e}")
|
| 239 |
+
print(" Falling back to bbox-based masks")
|
| 240 |
+
import traceback
|
| 241 |
+
traceback.print_exc()
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
# ============================================================================
|
| 245 |
+
# Segmentation Functions
|
| 246 |
+
# ============================================================================
|
| 247 |
+
|
| 248 |
+
def generate_distinct_colors(n):
|
| 249 |
+
"""Generate N visually distinct colors (RGB, 0-255)"""
|
| 250 |
+
import colorsys
|
| 251 |
+
if n == 0:
|
| 252 |
+
return []
|
| 253 |
+
|
| 254 |
+
colors = []
|
| 255 |
+
for i in range(n):
|
| 256 |
+
hue = i / max(n, 1)
|
| 257 |
+
rgb = colorsys.hsv_to_rgb(hue, 0.9, 0.95)
|
| 258 |
+
rgb_color = tuple(int(c * 255) for c in rgb)
|
| 259 |
+
colors.append(rgb_color)
|
| 260 |
+
|
| 261 |
+
return colors
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def run_grounding_dino_detection(image_np, text_prompt, device):
|
| 265 |
+
"""Run GroundingDINO detection"""
|
| 266 |
+
if grounding_dino_model is None or grounding_dino_processor is None:
|
| 267 |
+
print("⚠️ GroundingDINO not loaded")
|
| 268 |
+
return []
|
| 269 |
+
|
| 270 |
+
try:
|
| 271 |
+
print(f"🔍 GroundingDINO detection: {text_prompt}")
|
| 272 |
+
|
| 273 |
+
# Convert to PIL Image
|
| 274 |
+
if image_np.dtype == np.uint8:
|
| 275 |
+
pil_image = Image.fromarray(image_np)
|
| 276 |
+
else:
|
| 277 |
+
pil_image = Image.fromarray((image_np * 255).astype(np.uint8))
|
| 278 |
+
|
| 279 |
+
# Preprocess
|
| 280 |
+
inputs = grounding_dino_processor(images=pil_image, text=text_prompt, return_tensors="pt")
|
| 281 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 282 |
+
|
| 283 |
+
# Inference
|
| 284 |
+
with torch.no_grad():
|
| 285 |
+
outputs = grounding_dino_model(**inputs)
|
| 286 |
+
|
| 287 |
+
# Post-process
|
| 288 |
+
results = grounding_dino_processor.post_process_grounded_object_detection(
|
| 289 |
+
outputs,
|
| 290 |
+
inputs["input_ids"],
|
| 291 |
+
threshold=GROUNDING_DINO_BOX_THRESHOLD,
|
| 292 |
+
text_threshold=GROUNDING_DINO_TEXT_THRESHOLD,
|
| 293 |
+
target_sizes=[pil_image.size[::-1]]
|
| 294 |
+
)[0]
|
| 295 |
+
|
| 296 |
+
# Convert to unified format
|
| 297 |
+
detections = []
|
| 298 |
+
boxes = results["boxes"].cpu().numpy()
|
| 299 |
+
scores = results["scores"].cpu().numpy()
|
| 300 |
+
labels = results["labels"]
|
| 301 |
+
|
| 302 |
+
print(f"✅ Detected {len(boxes)} objects")
|
| 303 |
+
|
| 304 |
+
for box, score, label in zip(boxes, scores, labels):
|
| 305 |
+
detection = {
|
| 306 |
+
'bbox': box.tolist(), # [x1, y1, x2, y2]
|
| 307 |
+
'label': label,
|
| 308 |
+
'confidence': float(score)
|
| 309 |
+
}
|
| 310 |
+
detections.append(detection)
|
| 311 |
+
print(f" - {label}: {score:.2f}")
|
| 312 |
+
|
| 313 |
+
return detections
|
| 314 |
+
|
| 315 |
+
except Exception as e:
|
| 316 |
+
print(f"❌ GroundingDINO detection failed: {e}")
|
| 317 |
+
import traceback
|
| 318 |
+
traceback.print_exc()
|
| 319 |
+
return []
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def run_sam_refinement(image_np, boxes):
|
| 323 |
+
"""Run SAM precise segmentation"""
|
| 324 |
+
if sam_predictor is None:
|
| 325 |
+
print("⚠️ SAM not loaded, using bbox as mask")
|
| 326 |
+
# Use bbox to create simple rectangular mask
|
| 327 |
+
masks = []
|
| 328 |
+
h, w = image_np.shape[:2]
|
| 329 |
+
for box in boxes:
|
| 330 |
+
x1, y1, x2, y2 = map(int, box)
|
| 331 |
+
mask = np.zeros((h, w), dtype=bool)
|
| 332 |
+
mask[y1:y2, x1:x2] = True
|
| 333 |
+
masks.append(mask)
|
| 334 |
+
return masks
|
| 335 |
+
|
| 336 |
+
try:
|
| 337 |
+
print(f"🎯 SAM precise segmentation for {len(boxes)} regions...")
|
| 338 |
+
sam_predictor.set_image(image_np)
|
| 339 |
+
|
| 340 |
+
masks = []
|
| 341 |
+
for box in boxes:
|
| 342 |
+
x1, y1, x2, y2 = map(int, box)
|
| 343 |
+
box_array = np.array([x1, y1, x2, y2])
|
| 344 |
+
|
| 345 |
+
mask_output, _, _ = sam_predictor.predict(
|
| 346 |
+
box=box_array,
|
| 347 |
+
multimask_output=False
|
| 348 |
+
)
|
| 349 |
+
masks.append(mask_output[0])
|
| 350 |
+
|
| 351 |
+
print(f"✅ SAM segmentation complete")
|
| 352 |
+
return masks
|
| 353 |
+
|
| 354 |
+
except Exception as e:
|
| 355 |
+
print(f"❌ SAM segmentation failed: {e}")
|
| 356 |
+
# Fallback to bbox masks
|
| 357 |
+
masks = []
|
| 358 |
+
h, w = image_np.shape[:2]
|
| 359 |
+
for box in boxes:
|
| 360 |
+
x1, y1, x2, y2 = map(int, box)
|
| 361 |
+
mask = np.zeros((h, w), dtype=bool)
|
| 362 |
+
mask[y1:y2, x1:x2] = True
|
| 363 |
+
masks.append(mask)
|
| 364 |
+
return masks
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def normalize_label(label):
|
| 368 |
+
"""Normalize label to main category"""
|
| 369 |
+
label = label.strip().lower()
|
| 370 |
+
|
| 371 |
+
priority_labels = ['sofa', 'bed', 'table', 'desk', 'chair', 'cabinet', 'window', 'door']
|
| 372 |
+
|
| 373 |
+
for priority in priority_labels:
|
| 374 |
+
if priority in label:
|
| 375 |
+
return priority
|
| 376 |
+
|
| 377 |
+
first_word = label.split()[0] if label else label
|
| 378 |
+
|
| 379 |
+
# Handle plural forms
|
| 380 |
+
if first_word.endswith('s') and len(first_word) > 1:
|
| 381 |
+
singular = first_word[:-1]
|
| 382 |
+
if first_word.endswith('sses'):
|
| 383 |
+
singular = first_word[:-2]
|
| 384 |
+
elif first_word.endswith('ies'):
|
| 385 |
+
singular = first_word[:-3] + 'y'
|
| 386 |
+
elif first_word.endswith('ves'):
|
| 387 |
+
singular = first_word[:-3] + 'f'
|
| 388 |
+
return singular
|
| 389 |
+
|
| 390 |
+
return first_word
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def compute_object_3d_center(points, mask):
|
| 394 |
+
"""Compute 3D center of object"""
|
| 395 |
+
masked_points = points[mask]
|
| 396 |
+
if len(masked_points) == 0:
|
| 397 |
+
return None
|
| 398 |
+
return np.median(masked_points, axis=0)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def compute_adaptive_eps(centers, base_eps):
|
| 402 |
+
"""Adaptively compute eps value based on object distribution"""
|
| 403 |
+
if len(centers) <= 1:
|
| 404 |
+
return base_eps
|
| 405 |
+
|
| 406 |
+
from scipy.spatial.distance import pdist
|
| 407 |
+
distances = pdist(centers)
|
| 408 |
+
|
| 409 |
+
if len(distances) == 0:
|
| 410 |
+
return base_eps
|
| 411 |
+
|
| 412 |
+
median_dist = np.median(distances)
|
| 413 |
+
|
| 414 |
+
if median_dist > base_eps * 2:
|
| 415 |
+
adaptive_eps = min(median_dist * 0.6, base_eps * 2.5)
|
| 416 |
+
elif median_dist > base_eps:
|
| 417 |
+
adaptive_eps = median_dist * 0.5
|
| 418 |
+
else:
|
| 419 |
+
adaptive_eps = base_eps
|
| 420 |
+
|
| 421 |
+
return adaptive_eps
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def match_objects_across_views(all_view_detections):
|
| 425 |
+
"""Match objects across views using DBSCAN clustering"""
|
| 426 |
+
print("\n🔗 Matching objects across views using DBSCAN clustering...")
|
| 427 |
+
|
| 428 |
+
objects_by_label = defaultdict(list)
|
| 429 |
+
|
| 430 |
+
for view_idx, detections in enumerate(all_view_detections):
|
| 431 |
+
for det_idx, det in enumerate(detections):
|
| 432 |
+
if det.get('center_3d') is None:
|
| 433 |
+
continue
|
| 434 |
+
|
| 435 |
+
norm_label = normalize_label(det['label'])
|
| 436 |
+
objects_by_label[norm_label].append({
|
| 437 |
+
'view_idx': view_idx,
|
| 438 |
+
'det_idx': det_idx,
|
| 439 |
+
'label': det['label'],
|
| 440 |
+
'norm_label': norm_label,
|
| 441 |
+
'center_3d': det['center_3d'],
|
| 442 |
+
'confidence': det['confidence'],
|
| 443 |
+
})
|
| 444 |
+
|
| 445 |
+
if len(objects_by_label) == 0:
|
| 446 |
+
return {}, []
|
| 447 |
+
|
| 448 |
+
object_id_map = defaultdict(dict)
|
| 449 |
+
unique_objects = []
|
| 450 |
+
next_global_id = 0
|
| 451 |
+
|
| 452 |
+
for norm_label, objects in objects_by_label.items():
|
| 453 |
+
print(f"\n 📦 Processing {norm_label}: {len(objects)} detections")
|
| 454 |
+
|
| 455 |
+
if len(objects) == 1:
|
| 456 |
+
obj = objects[0]
|
| 457 |
+
unique_objects.append({
|
| 458 |
+
'global_id': next_global_id,
|
| 459 |
+
'label': obj['label'],
|
| 460 |
+
'views': [(obj['view_idx'], obj['det_idx'])],
|
| 461 |
+
'center_3d': obj['center_3d'],
|
| 462 |
+
})
|
| 463 |
+
object_id_map[obj['view_idx']][obj['det_idx']] = next_global_id
|
| 464 |
+
next_global_id += 1
|
| 465 |
+
print(f" → 1 cluster (single detection)")
|
| 466 |
+
continue
|
| 467 |
+
|
| 468 |
+
centers = np.array([obj['center_3d'] for obj in objects])
|
| 469 |
+
|
| 470 |
+
base_eps = DBSCAN_EPS_CONFIG.get(norm_label, DBSCAN_EPS_CONFIG.get('default', 1.0))
|
| 471 |
+
eps = compute_adaptive_eps(centers, base_eps)
|
| 472 |
+
|
| 473 |
+
clustering = DBSCAN(eps=eps, min_samples=DBSCAN_MIN_SAMPLES, metric='euclidean')
|
| 474 |
+
cluster_labels = clustering.fit_predict(centers)
|
| 475 |
+
|
| 476 |
+
n_clusters = len(set(cluster_labels)) - (1 if -1 in cluster_labels else 0)
|
| 477 |
+
n_noise = list(cluster_labels).count(-1)
|
| 478 |
+
|
| 479 |
+
if eps != base_eps:
|
| 480 |
+
print(f" → {n_clusters} clusters (base_eps={base_eps}m → adaptive_eps={eps:.2f}m)")
|
| 481 |
+
else:
|
| 482 |
+
print(f" → {n_clusters} clusters (eps={eps}m)")
|
| 483 |
+
if n_noise > 0:
|
| 484 |
+
print(f" ⚠️ {n_noise} noise points (isolated detections)")
|
| 485 |
+
|
| 486 |
+
for cluster_id in set(cluster_labels):
|
| 487 |
+
if cluster_id == -1:
|
| 488 |
+
for i, label in enumerate(cluster_labels):
|
| 489 |
+
if label == -1:
|
| 490 |
+
obj = objects[i]
|
| 491 |
+
unique_objects.append({
|
| 492 |
+
'global_id': next_global_id,
|
| 493 |
+
'label': obj['label'],
|
| 494 |
+
'views': [(obj['view_idx'], obj['det_idx'])],
|
| 495 |
+
'center_3d': obj['center_3d'],
|
| 496 |
+
})
|
| 497 |
+
object_id_map[obj['view_idx']][obj['det_idx']] = next_global_id
|
| 498 |
+
next_global_id += 1
|
| 499 |
+
else:
|
| 500 |
+
cluster_objects = [objects[i] for i, label in enumerate(cluster_labels) if label == cluster_id]
|
| 501 |
+
|
| 502 |
+
total_conf = sum(o['confidence'] for o in cluster_objects)
|
| 503 |
+
weighted_center = sum(o['center_3d'] * o['confidence'] for o in cluster_objects) / total_conf
|
| 504 |
+
|
| 505 |
+
unique_objects.append({
|
| 506 |
+
'global_id': next_global_id,
|
| 507 |
+
'label': cluster_objects[0]['label'],
|
| 508 |
+
'views': [(o['view_idx'], o['det_idx']) for o in cluster_objects],
|
| 509 |
+
'center_3d': weighted_center,
|
| 510 |
+
})
|
| 511 |
+
|
| 512 |
+
for obj in cluster_objects:
|
| 513 |
+
object_id_map[obj['view_idx']][obj['det_idx']] = next_global_id
|
| 514 |
+
|
| 515 |
+
next_global_id += 1
|
| 516 |
+
|
| 517 |
+
print(f"\n 📊 Summary:")
|
| 518 |
+
print(f" Total detections: {sum(len(objs) for objs in objects_by_label.values())}")
|
| 519 |
+
print(f" Unique objects: {len(unique_objects)}")
|
| 520 |
+
|
| 521 |
+
return object_id_map, unique_objects
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
def create_multi_view_segmented_mesh(processed_data, all_view_detections, all_view_masks,
|
| 525 |
+
object_id_map, unique_objects, target_dir):
|
| 526 |
+
"""Create multi-view fused segmented mesh"""
|
| 527 |
+
try:
|
| 528 |
+
print("\n🎨 Generating multi-view segmented mesh...")
|
| 529 |
+
|
| 530 |
+
unique_normalized_labels = sorted(set(normalize_label(obj['label']) for obj in unique_objects))
|
| 531 |
+
label_colors = {}
|
| 532 |
+
colors = generate_distinct_colors(len(unique_normalized_labels))
|
| 533 |
+
|
| 534 |
+
for i, norm_label in enumerate(unique_normalized_labels):
|
| 535 |
+
label_colors[norm_label] = colors[i]
|
| 536 |
+
|
| 537 |
+
for obj in unique_objects:
|
| 538 |
+
norm_label = normalize_label(obj['label'])
|
| 539 |
+
obj['color'] = label_colors[norm_label]
|
| 540 |
+
obj['normalized_label'] = norm_label
|
| 541 |
+
|
| 542 |
+
print(f" Object category color mapping:")
|
| 543 |
+
for norm_label, color in sorted(label_colors.items()):
|
| 544 |
+
count = sum(1 for obj in unique_objects if normalize_label(obj['label']) == norm_label)
|
| 545 |
+
print(f" {norm_label} × {count} → RGB{color}")
|
| 546 |
+
|
| 547 |
+
import utils3d
|
| 548 |
+
|
| 549 |
+
all_meshes = []
|
| 550 |
+
|
| 551 |
+
for view_idx in range(len(processed_data)):
|
| 552 |
+
view_data = processed_data[view_idx]
|
| 553 |
+
image = view_data["image"]
|
| 554 |
+
points3d = view_data["points3d"]
|
| 555 |
+
mask = view_data.get("mask")
|
| 556 |
+
normal = view_data.get("normal")
|
| 557 |
+
|
| 558 |
+
detections = all_view_detections[view_idx]
|
| 559 |
+
masks = all_view_masks[view_idx]
|
| 560 |
+
|
| 561 |
+
if len(detections) == 0:
|
| 562 |
+
continue
|
| 563 |
+
|
| 564 |
+
if image.dtype != np.uint8:
|
| 565 |
+
if image.max() <= 1.0:
|
| 566 |
+
image = (image * 255).astype(np.uint8)
|
| 567 |
+
else:
|
| 568 |
+
image = image.astype(np.uint8)
|
| 569 |
+
|
| 570 |
+
colored_image = image.copy()
|
| 571 |
+
confidence_map = np.zeros((image.shape[0], image.shape[1]), dtype=np.float32)
|
| 572 |
+
|
| 573 |
+
detections_info = []
|
| 574 |
+
filtered_count = 0
|
| 575 |
+
for det_idx, (det, seg_mask) in enumerate(zip(detections, masks)):
|
| 576 |
+
if det['confidence'] < MIN_DETECTION_CONFIDENCE:
|
| 577 |
+
filtered_count += 1
|
| 578 |
+
continue
|
| 579 |
+
|
| 580 |
+
mask_area = seg_mask.sum()
|
| 581 |
+
if mask_area < MIN_MASK_AREA:
|
| 582 |
+
filtered_count += 1
|
| 583 |
+
continue
|
| 584 |
+
|
| 585 |
+
global_id = object_id_map[view_idx].get(det_idx)
|
| 586 |
+
if global_id is None:
|
| 587 |
+
continue
|
| 588 |
+
|
| 589 |
+
unique_obj = next((obj for obj in unique_objects if obj['global_id'] == global_id), None)
|
| 590 |
+
if unique_obj is None:
|
| 591 |
+
continue
|
| 592 |
+
|
| 593 |
+
detections_info.append({
|
| 594 |
+
'mask': seg_mask,
|
| 595 |
+
'color': unique_obj['color'],
|
| 596 |
+
'confidence': det['confidence'],
|
| 597 |
+
})
|
| 598 |
+
|
| 599 |
+
if filtered_count > 0:
|
| 600 |
+
print(f" View {view_idx + 1}: filtered {filtered_count} low-quality detections")
|
| 601 |
+
|
| 602 |
+
detections_info.sort(key=lambda x: x['confidence'])
|
| 603 |
+
|
| 604 |
+
for info in detections_info:
|
| 605 |
+
seg_mask = info['mask']
|
| 606 |
+
color = info['color']
|
| 607 |
+
conf = info['confidence']
|
| 608 |
+
|
| 609 |
+
update_mask = seg_mask & (conf > confidence_map)
|
| 610 |
+
colored_image[update_mask] = color
|
| 611 |
+
confidence_map[update_mask] = conf
|
| 612 |
+
|
| 613 |
+
height, width = image.shape[:2]
|
| 614 |
+
|
| 615 |
+
if normal is None:
|
| 616 |
+
faces, vertices, vertex_colors, vertex_uvs = utils3d.numpy.image_mesh(
|
| 617 |
+
points3d,
|
| 618 |
+
colored_image.astype(np.float32) / 255,
|
| 619 |
+
utils3d.numpy.image_uv(width=width, height=height),
|
| 620 |
+
mask=mask if mask is not None else np.ones((height, width), dtype=bool),
|
| 621 |
+
tri=True
|
| 622 |
+
)
|
| 623 |
+
vertex_normals = None
|
| 624 |
+
else:
|
| 625 |
+
faces, vertices, vertex_colors, vertex_uvs, vertex_normals = utils3d.numpy.image_mesh(
|
| 626 |
+
points3d,
|
| 627 |
+
colored_image.astype(np.float32) / 255,
|
| 628 |
+
utils3d.numpy.image_uv(width=width, height=height),
|
| 629 |
+
normal,
|
| 630 |
+
mask=mask if mask is not None else np.ones((height, width), dtype=bool),
|
| 631 |
+
tri=True
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
vertices = vertices * np.array([1, -1, -1], dtype=np.float32)
|
| 635 |
+
if vertex_normals is not None:
|
| 636 |
+
vertex_normals = vertex_normals * np.array([1, -1, -1], dtype=np.float32)
|
| 637 |
+
|
| 638 |
+
view_mesh = trimesh.Trimesh(
|
| 639 |
+
vertices=vertices,
|
| 640 |
+
faces=faces,
|
| 641 |
+
vertex_normals=vertex_normals,
|
| 642 |
+
vertex_colors=(vertex_colors * 255).astype(np.uint8),
|
| 643 |
+
process=False
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
all_meshes.append(view_mesh)
|
| 647 |
+
print(f" View {view_idx + 1}: {len(vertices):,} vertices, {len(faces):,} faces")
|
| 648 |
+
|
| 649 |
+
if len(all_meshes) == 0:
|
| 650 |
+
print("⚠️ No mesh generated")
|
| 651 |
+
return None
|
| 652 |
+
|
| 653 |
+
print(" Fusing all views...")
|
| 654 |
+
combined_mesh = trimesh.util.concatenate(all_meshes)
|
| 655 |
+
|
| 656 |
+
glb_path = os.path.join(target_dir, 'segmented_mesh.glb')
|
| 657 |
+
combined_mesh.export(glb_path)
|
| 658 |
+
|
| 659 |
+
print(f"✅ Multi-view segmented mesh saved: {glb_path}")
|
| 660 |
+
print(f" Total: {len(combined_mesh.vertices):,} vertices, {len(combined_mesh.faces):,} faces")
|
| 661 |
+
print(f" {len(unique_objects)} unique objects")
|
| 662 |
+
|
| 663 |
+
return glb_path
|
| 664 |
+
|
| 665 |
+
except Exception as e:
|
| 666 |
+
print(f"❌ Failed to generate multi-view mesh: {e}")
|
| 667 |
+
import traceback
|
| 668 |
+
traceback.print_exc()
|
| 669 |
+
return None
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
# ============================================================================
|
| 673 |
+
# Core Model Inference
|
| 674 |
+
# ============================================================================
|
| 675 |
+
|
| 676 |
+
@spaces.GPU(duration=120)
|
| 677 |
+
def run_model(
|
| 678 |
+
target_dir,
|
| 679 |
+
apply_mask=True,
|
| 680 |
+
mask_edges=True,
|
| 681 |
+
filter_black_bg=False,
|
| 682 |
+
filter_white_bg=False,
|
| 683 |
+
enable_segmentation=False,
|
| 684 |
+
text_prompt=DEFAULT_TEXT_PROMPT,
|
| 685 |
+
):
|
| 686 |
+
"""
|
| 687 |
+
Run the MapAnything model + optional segmentation
|
| 688 |
+
"""
|
| 689 |
+
global model
|
| 690 |
+
import torch
|
| 691 |
+
|
| 692 |
+
print(f"Processing images from {target_dir}")
|
| 693 |
+
|
| 694 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 695 |
+
device = torch.device(device)
|
| 696 |
+
|
| 697 |
+
# Initialize MapAnything model
|
| 698 |
+
if model is None:
|
| 699 |
+
model = initialize_mapanything_model(high_level_config, device)
|
| 700 |
+
else:
|
| 701 |
+
model = model.to(device)
|
| 702 |
+
|
| 703 |
+
model.eval()
|
| 704 |
+
|
| 705 |
+
# Load segmentation models if enabled
|
| 706 |
+
if enable_segmentation:
|
| 707 |
+
load_grounding_dino_model(device)
|
| 708 |
+
load_sam_model(device)
|
| 709 |
+
|
| 710 |
+
# Load images
|
| 711 |
+
print("Loading images...")
|
| 712 |
+
image_folder_path = os.path.join(target_dir, "images")
|
| 713 |
+
views = load_images(image_folder_path)
|
| 714 |
+
|
| 715 |
+
print(f"Loaded {len(views)} images")
|
| 716 |
+
if len(views) == 0:
|
| 717 |
+
raise ValueError("No images found. Check your upload.")
|
| 718 |
+
|
| 719 |
+
# Run model inference
|
| 720 |
+
print("Running inference...")
|
| 721 |
+
outputs = model.infer(
|
| 722 |
+
views, apply_mask=apply_mask, mask_edges=True, memory_efficient_inference=False
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
# Convert predictions
|
| 726 |
+
predictions = {}
|
| 727 |
+
extrinsic_list = []
|
| 728 |
+
intrinsic_list = []
|
| 729 |
+
world_points_list = []
|
| 730 |
+
depth_maps_list = []
|
| 731 |
+
images_list = []
|
| 732 |
+
final_mask_list = []
|
| 733 |
+
|
| 734 |
+
for pred in outputs:
|
| 735 |
+
depthmap_torch = pred["depth_z"][0].squeeze(-1)
|
| 736 |
+
intrinsics_torch = pred["intrinsics"][0]
|
| 737 |
+
camera_pose_torch = pred["camera_poses"][0]
|
| 738 |
+
|
| 739 |
+
pts3d_computed, valid_mask = depthmap_to_world_frame(
|
| 740 |
+
depthmap_torch, intrinsics_torch, camera_pose_torch
|
| 741 |
+
)
|
| 742 |
+
|
| 743 |
+
if "mask" in pred:
|
| 744 |
+
mask = pred["mask"][0].squeeze(-1).cpu().numpy().astype(bool)
|
| 745 |
+
else:
|
| 746 |
+
mask = np.ones_like(depthmap_torch.cpu().numpy(), dtype=bool)
|
| 747 |
+
|
| 748 |
+
mask = mask & valid_mask.cpu().numpy()
|
| 749 |
+
image = pred["img_no_norm"][0].cpu().numpy()
|
| 750 |
+
|
| 751 |
+
extrinsic_list.append(camera_pose_torch.cpu().numpy())
|
| 752 |
+
intrinsic_list.append(intrinsics_torch.cpu().numpy())
|
| 753 |
+
world_points_list.append(pts3d_computed.cpu().numpy())
|
| 754 |
+
depth_maps_list.append(depthmap_torch.cpu().numpy())
|
| 755 |
+
images_list.append(image)
|
| 756 |
+
final_mask_list.append(mask)
|
| 757 |
+
|
| 758 |
+
predictions["extrinsic"] = np.stack(extrinsic_list, axis=0)
|
| 759 |
+
predictions["intrinsic"] = np.stack(intrinsic_list, axis=0)
|
| 760 |
+
predictions["world_points"] = np.stack(world_points_list, axis=0)
|
| 761 |
+
|
| 762 |
+
depth_maps = np.stack(depth_maps_list, axis=0)
|
| 763 |
+
if len(depth_maps.shape) == 3:
|
| 764 |
+
depth_maps = depth_maps[..., np.newaxis]
|
| 765 |
+
predictions["depth"] = depth_maps
|
| 766 |
+
|
| 767 |
+
predictions["images"] = np.stack(images_list, axis=0)
|
| 768 |
+
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 769 |
+
|
| 770 |
+
# Process visualization data
|
| 771 |
+
processed_data = process_predictions_for_visualization(
|
| 772 |
+
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
# Segmentation processing
|
| 776 |
+
segmented_glb = None
|
| 777 |
+
if enable_segmentation and grounding_dino_model is not None:
|
| 778 |
+
print("\n🎯 Starting segmentation...")
|
| 779 |
+
print(f"🔍 Detection prompt: {text_prompt[:100]}...")
|
| 780 |
+
|
| 781 |
+
all_view_detections = []
|
| 782 |
+
all_view_masks = []
|
| 783 |
+
|
| 784 |
+
for view_idx, ref_image in enumerate(images_list):
|
| 785 |
+
print(f"\n📸 Processing view {view_idx + 1}/{len(images_list)}...")
|
| 786 |
+
|
| 787 |
+
if ref_image.dtype != np.uint8:
|
| 788 |
+
ref_image_np = (ref_image * 255).astype(np.uint8)
|
| 789 |
+
else:
|
| 790 |
+
ref_image_np = ref_image
|
| 791 |
+
|
| 792 |
+
detections = run_grounding_dino_detection(ref_image_np, text_prompt, device)
|
| 793 |
+
|
| 794 |
+
if len(detections) > 0:
|
| 795 |
+
boxes = [d['bbox'] for d in detections]
|
| 796 |
+
masks = run_sam_refinement(ref_image_np, boxes)
|
| 797 |
+
|
| 798 |
+
points3d = world_points_list[view_idx]
|
| 799 |
+
|
| 800 |
+
for det_idx, (det, mask) in enumerate(zip(detections, masks)):
|
| 801 |
+
center_3d = compute_object_3d_center(points3d, mask)
|
| 802 |
+
det['center_3d'] = center_3d
|
| 803 |
+
det['mask_2d'] = mask
|
| 804 |
+
|
| 805 |
+
all_view_detections.append(detections)
|
| 806 |
+
all_view_masks.append(masks)
|
| 807 |
+
else:
|
| 808 |
+
all_view_detections.append([])
|
| 809 |
+
all_view_masks.append([])
|
| 810 |
+
|
| 811 |
+
# Match objects across views
|
| 812 |
+
if any(len(dets) > 0 for dets in all_view_detections):
|
| 813 |
+
object_id_map, unique_objects = match_objects_across_views(all_view_detections)
|
| 814 |
+
|
| 815 |
+
# Generate segmented mesh
|
| 816 |
+
segmented_glb = create_multi_view_segmented_mesh(
|
| 817 |
+
processed_data, all_view_detections, all_view_masks,
|
| 818 |
+
object_id_map, unique_objects, target_dir
|
| 819 |
+
)
|
| 820 |
+
|
| 821 |
+
# Cleanup
|
| 822 |
+
torch.cuda.empty_cache()
|
| 823 |
+
|
| 824 |
+
return predictions, processed_data, segmented_glb
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
# ============================================================================
|
| 828 |
+
# Helper Functions (from app.py)
|
| 829 |
+
# ============================================================================
|
| 830 |
+
|
| 831 |
+
def update_view_selectors(processed_data):
|
| 832 |
+
"""Update view selector dropdowns based on available views"""
|
| 833 |
+
if processed_data is None or len(processed_data) == 0:
|
| 834 |
+
choices = ["View 1"]
|
| 835 |
+
else:
|
| 836 |
+
num_views = len(processed_data)
|
| 837 |
+
choices = [f"View {i + 1}" for i in range(num_views)]
|
| 838 |
+
|
| 839 |
+
return (
|
| 840 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 841 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 842 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 843 |
+
)
|
| 844 |
+
|
| 845 |
+
|
| 846 |
+
def get_view_data_by_index(processed_data, view_index):
|
| 847 |
+
"""Get view data by index, handling bounds"""
|
| 848 |
+
if processed_data is None or len(processed_data) == 0:
|
| 849 |
+
return None
|
| 850 |
+
|
| 851 |
+
view_keys = list(processed_data.keys())
|
| 852 |
+
if view_index < 0 or view_index >= len(view_keys):
|
| 853 |
+
view_index = 0
|
| 854 |
+
|
| 855 |
+
return processed_data[view_keys[view_index]]
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def update_depth_view(processed_data, view_index):
|
| 859 |
+
"""Update depth view for a specific view index"""
|
| 860 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 861 |
+
if view_data is None or view_data["depth"] is None:
|
| 862 |
+
return None
|
| 863 |
+
|
| 864 |
+
return colorize_depth(view_data["depth"], mask=view_data.get("mask"))
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
def update_normal_view(processed_data, view_index):
|
| 868 |
+
"""Update normal view for a specific view index"""
|
| 869 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 870 |
+
if view_data is None or view_data["normal"] is None:
|
| 871 |
+
return None
|
| 872 |
+
|
| 873 |
+
return colorize_normal(view_data["normal"], mask=view_data.get("mask"))
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
def update_measure_view(processed_data, view_index):
|
| 877 |
+
"""Update measure view for a specific view index with mask overlay"""
|
| 878 |
+
view_data = get_view_data_by_index(processed_data, view_index)
|
| 879 |
+
if view_data is None:
|
| 880 |
+
return None, []
|
| 881 |
+
|
| 882 |
+
image = view_data["image"].copy()
|
| 883 |
+
|
| 884 |
+
if image.dtype != np.uint8:
|
| 885 |
+
if image.max() <= 1.0:
|
| 886 |
+
image = (image * 255).astype(np.uint8)
|
| 887 |
+
else:
|
| 888 |
+
image = image.astype(np.uint8)
|
| 889 |
+
|
| 890 |
+
if view_data["mask"] is not None:
|
| 891 |
+
mask = view_data["mask"]
|
| 892 |
+
invalid_mask = ~mask
|
| 893 |
+
|
| 894 |
+
if invalid_mask.any():
|
| 895 |
+
overlay_color = np.array([255, 220, 220], dtype=np.uint8)
|
| 896 |
+
alpha = 0.5
|
| 897 |
+
for c in range(3):
|
| 898 |
+
image[:, :, c] = np.where(
|
| 899 |
+
invalid_mask,
|
| 900 |
+
(1 - alpha) * image[:, :, c] + alpha * overlay_color[c],
|
| 901 |
+
image[:, :, c],
|
| 902 |
+
).astype(np.uint8)
|
| 903 |
+
|
| 904 |
+
return image, []
|
| 905 |
+
|
| 906 |
+
|
| 907 |
+
def navigate_depth_view(processed_data, current_selector_value, direction):
|
| 908 |
+
"""Navigate depth view (direction: -1 for previous, +1 for next)"""
|
| 909 |
+
if processed_data is None or len(processed_data) == 0:
|
| 910 |
+
return "View 1", None
|
| 911 |
+
|
| 912 |
+
try:
|
| 913 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 914 |
+
except:
|
| 915 |
+
current_view = 0
|
| 916 |
+
|
| 917 |
+
num_views = len(processed_data)
|
| 918 |
+
new_view = (current_view + direction) % num_views
|
| 919 |
+
|
| 920 |
+
new_selector_value = f"View {new_view + 1}"
|
| 921 |
+
depth_vis = update_depth_view(processed_data, new_view)
|
| 922 |
+
|
| 923 |
+
return new_selector_value, depth_vis
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
def navigate_normal_view(processed_data, current_selector_value, direction):
|
| 927 |
+
"""Navigate normal view (direction: -1 for previous, +1 for next)"""
|
| 928 |
+
if processed_data is None or len(processed_data) == 0:
|
| 929 |
+
return "View 1", None
|
| 930 |
+
|
| 931 |
+
try:
|
| 932 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 933 |
+
except:
|
| 934 |
+
current_view = 0
|
| 935 |
+
|
| 936 |
+
num_views = len(processed_data)
|
| 937 |
+
new_view = (current_view + direction) % num_views
|
| 938 |
+
|
| 939 |
+
new_selector_value = f"View {new_view + 1}"
|
| 940 |
+
normal_vis = update_normal_view(processed_data, new_view)
|
| 941 |
+
|
| 942 |
+
return new_selector_value, normal_vis
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
def navigate_measure_view(processed_data, current_selector_value, direction):
|
| 946 |
+
"""Navigate measure view (direction: -1 for previous, +1 for next)"""
|
| 947 |
+
if processed_data is None or len(processed_data) == 0:
|
| 948 |
+
return "View 1", None, []
|
| 949 |
+
|
| 950 |
+
try:
|
| 951 |
+
current_view = int(current_selector_value.split()[1]) - 1
|
| 952 |
+
except:
|
| 953 |
+
current_view = 0
|
| 954 |
+
|
| 955 |
+
num_views = len(processed_data)
|
| 956 |
+
new_view = (current_view + direction) % num_views
|
| 957 |
+
|
| 958 |
+
new_selector_value = f"View {new_view + 1}"
|
| 959 |
+
measure_image, measure_points = update_measure_view(processed_data, new_view)
|
| 960 |
+
|
| 961 |
+
return new_selector_value, measure_image, measure_points
|
| 962 |
+
|
| 963 |
+
|
| 964 |
+
def populate_visualization_tabs(processed_data):
|
| 965 |
+
"""Populate the depth, normal, and measure tabs with processed data"""
|
| 966 |
+
if processed_data is None or len(processed_data) == 0:
|
| 967 |
+
return None, None, None, []
|
| 968 |
+
|
| 969 |
+
depth_vis = update_depth_view(processed_data, 0)
|
| 970 |
+
normal_vis = update_normal_view(processed_data, 0)
|
| 971 |
+
measure_img, _ = update_measure_view(processed_data, 0)
|
| 972 |
+
|
| 973 |
+
return depth_vis, normal_vis, measure_img, []
|
| 974 |
+
|
| 975 |
+
|
| 976 |
+
def handle_uploads(unified_upload, s_time_interval=1.0):
|
| 977 |
+
"""
|
| 978 |
+
Create a new 'target_dir' + 'images' subfolder, and place user-uploaded
|
| 979 |
+
images or extracted frames from video into it. Return (target_dir, image_paths).
|
| 980 |
+
"""
|
| 981 |
+
start_time = time.time()
|
| 982 |
+
gc.collect()
|
| 983 |
+
torch.cuda.empty_cache()
|
| 984 |
+
|
| 985 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
| 986 |
+
target_dir = f"input_images_{timestamp}"
|
| 987 |
+
target_dir_images = os.path.join(target_dir, "images")
|
| 988 |
+
|
| 989 |
+
if os.path.exists(target_dir):
|
| 990 |
+
shutil.rmtree(target_dir)
|
| 991 |
+
os.makedirs(target_dir)
|
| 992 |
+
os.makedirs(target_dir_images)
|
| 993 |
+
|
| 994 |
+
image_paths = []
|
| 995 |
+
|
| 996 |
+
if unified_upload is not None:
|
| 997 |
+
for file_data in unified_upload:
|
| 998 |
+
if isinstance(file_data, dict) and "name" in file_data:
|
| 999 |
+
file_path = file_data["name"]
|
| 1000 |
+
else:
|
| 1001 |
+
file_path = str(file_data)
|
| 1002 |
+
|
| 1003 |
+
file_ext = os.path.splitext(file_path)[1].lower()
|
| 1004 |
+
|
| 1005 |
+
video_extensions = [
|
| 1006 |
+
".mp4", ".avi", ".mov", ".mkv", ".wmv", ".flv", ".webm", ".m4v", ".3gp",
|
| 1007 |
+
]
|
| 1008 |
+
if file_ext in video_extensions:
|
| 1009 |
+
vs = cv2.VideoCapture(file_path)
|
| 1010 |
+
fps = vs.get(cv2.CAP_PROP_FPS)
|
| 1011 |
+
frame_interval = int(fps * s_time_interval)
|
| 1012 |
+
|
| 1013 |
+
count = 0
|
| 1014 |
+
video_frame_num = 0
|
| 1015 |
+
while True:
|
| 1016 |
+
gotit, frame = vs.read()
|
| 1017 |
+
if not gotit:
|
| 1018 |
+
break
|
| 1019 |
+
count += 1
|
| 1020 |
+
if count % frame_interval == 0:
|
| 1021 |
+
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
| 1022 |
+
image_path = os.path.join(
|
| 1023 |
+
target_dir_images, f"{base_name}_{video_frame_num:06}.png"
|
| 1024 |
+
)
|
| 1025 |
+
cv2.imwrite(image_path, frame)
|
| 1026 |
+
image_paths.append(image_path)
|
| 1027 |
+
video_frame_num += 1
|
| 1028 |
+
vs.release()
|
| 1029 |
+
print(f"Extracted {video_frame_num} frames from video: {os.path.basename(file_path)}")
|
| 1030 |
+
|
| 1031 |
+
else:
|
| 1032 |
+
if file_ext in [".heic", ".heif"]:
|
| 1033 |
+
try:
|
| 1034 |
+
with Image.open(file_path) as img:
|
| 1035 |
+
if img.mode not in ("RGB", "L"):
|
| 1036 |
+
img = img.convert("RGB")
|
| 1037 |
+
|
| 1038 |
+
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
| 1039 |
+
dst_path = os.path.join(target_dir_images, f"{base_name}.jpg")
|
| 1040 |
+
|
| 1041 |
+
img.save(dst_path, "JPEG", quality=95)
|
| 1042 |
+
image_paths.append(dst_path)
|
| 1043 |
+
print(f"Converted HEIC to JPEG: {os.path.basename(file_path)} -> {os.path.basename(dst_path)}")
|
| 1044 |
+
except Exception as e:
|
| 1045 |
+
print(f"Error converting HEIC file {file_path}: {e}")
|
| 1046 |
+
dst_path = os.path.join(target_dir_images, os.path.basename(file_path))
|
| 1047 |
+
shutil.copy(file_path, dst_path)
|
| 1048 |
+
image_paths.append(dst_path)
|
| 1049 |
+
else:
|
| 1050 |
+
dst_path = os.path.join(target_dir_images, os.path.basename(file_path))
|
| 1051 |
+
shutil.copy(file_path, dst_path)
|
| 1052 |
+
image_paths.append(dst_path)
|
| 1053 |
+
|
| 1054 |
+
image_paths = sorted(image_paths)
|
| 1055 |
+
|
| 1056 |
+
end_time = time.time()
|
| 1057 |
+
print(f"Files processed to {target_dir_images}; took {end_time - start_time:.3f} seconds")
|
| 1058 |
+
return target_dir, image_paths
|
| 1059 |
+
|
| 1060 |
+
|
| 1061 |
+
def update_gallery_on_upload(input_video, input_images, s_time_interval=1.0):
|
| 1062 |
+
"""Update gallery on upload"""
|
| 1063 |
+
if not input_video and not input_images:
|
| 1064 |
+
return None, None, None, None, None
|
| 1065 |
+
target_dir, image_paths = handle_uploads(input_video, input_images, s_time_interval)
|
| 1066 |
+
return (
|
| 1067 |
+
None,
|
| 1068 |
+
None,
|
| 1069 |
+
target_dir,
|
| 1070 |
+
image_paths,
|
| 1071 |
+
"Upload complete. Click 'Reconstruct' to begin 3D processing.",
|
| 1072 |
+
)
|
| 1073 |
+
|
| 1074 |
+
|
| 1075 |
+
@spaces.GPU(duration=120)
|
| 1076 |
+
def gradio_demo(
|
| 1077 |
+
target_dir,
|
| 1078 |
+
frame_filter="All",
|
| 1079 |
+
show_cam=True,
|
| 1080 |
+
filter_black_bg=False,
|
| 1081 |
+
filter_white_bg=False,
|
| 1082 |
+
conf_thres=3.0,
|
| 1083 |
+
apply_mask=True,
|
| 1084 |
+
show_mesh=True,
|
| 1085 |
+
enable_segmentation=False,
|
| 1086 |
+
text_prompt=DEFAULT_TEXT_PROMPT,
|
| 1087 |
+
use_sam=True,
|
| 1088 |
+
):
|
| 1089 |
+
"""执行重建"""
|
| 1090 |
+
if not os.path.isdir(target_dir) or target_dir == "None":
|
| 1091 |
+
return None, None, "❌ 未找到有效的目标目录,请先上传文件", None, None, None, None, None, None, None, None, None
|
| 1092 |
+
|
| 1093 |
+
start_time = time.time()
|
| 1094 |
+
gc.collect()
|
| 1095 |
+
torch.cuda.empty_cache()
|
| 1096 |
+
|
| 1097 |
+
target_dir_images = os.path.join(target_dir, "images")
|
| 1098 |
+
all_files = (
|
| 1099 |
+
sorted(os.listdir(target_dir_images))
|
| 1100 |
+
if os.path.isdir(target_dir_images)
|
| 1101 |
+
else []
|
| 1102 |
+
)
|
| 1103 |
+
all_files = [f"{i}: {filename}" for i, filename in enumerate(all_files)]
|
| 1104 |
+
frame_filter_choices = ["All"] + all_files
|
| 1105 |
+
|
| 1106 |
+
print("运行 MapAnything 模型...")
|
| 1107 |
+
with torch.no_grad():
|
| 1108 |
+
predictions, processed_data, segmented_glb = run_model(
|
| 1109 |
+
target_dir, apply_mask, True, filter_black_bg, filter_white_bg,
|
| 1110 |
+
enable_segmentation, text_prompt
|
| 1111 |
+
)
|
| 1112 |
+
|
| 1113 |
+
# 保存预测结果
|
| 1114 |
+
prediction_save_path = os.path.join(target_dir, "predictions.npz")
|
| 1115 |
+
np.savez(prediction_save_path, **predictions)
|
| 1116 |
+
|
| 1117 |
+
if frame_filter is None:
|
| 1118 |
+
frame_filter = "All"
|
| 1119 |
+
|
| 1120 |
+
# 生成 GLB 文件名
|
| 1121 |
+
glbfile = os.path.join(
|
| 1122 |
+
target_dir,
|
| 1123 |
+
f"glbscene_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}_mesh{show_mesh}.glb",
|
| 1124 |
+
)
|
| 1125 |
+
|
| 1126 |
+
# 转换预测结果为 GLB
|
| 1127 |
+
glbscene = predictions_to_glb(
|
| 1128 |
+
predictions,
|
| 1129 |
+
filter_by_frames=frame_filter,
|
| 1130 |
+
show_cam=show_cam,
|
| 1131 |
+
mask_black_bg=filter_black_bg,
|
| 1132 |
+
mask_white_bg=filter_white_bg,
|
| 1133 |
+
as_mesh=show_mesh,
|
| 1134 |
+
conf_percentile=conf_thres,
|
| 1135 |
+
)
|
| 1136 |
+
glbscene.export(file_obj=glbfile)
|
| 1137 |
+
|
| 1138 |
+
# 清理内存
|
| 1139 |
+
del predictions
|
| 1140 |
+
gc.collect()
|
| 1141 |
+
torch.cuda.empty_cache()
|
| 1142 |
+
|
| 1143 |
+
end_time = time.time()
|
| 1144 |
+
print(f"总耗时: {end_time - start_time:.2f}秒")
|
| 1145 |
+
log_msg = f"✅ 重建成功 ({len(all_files)} 帧)"
|
| 1146 |
+
|
| 1147 |
+
# Populate visualization tabs
|
| 1148 |
+
depth_vis, normal_vis, measure_img, measure_pts = populate_visualization_tabs(
|
| 1149 |
+
processed_data
|
| 1150 |
+
)
|
| 1151 |
+
|
| 1152 |
+
# Update view selectors
|
| 1153 |
+
depth_selector, normal_selector, measure_selector = update_view_selectors(
|
| 1154 |
+
processed_data
|
| 1155 |
+
)
|
| 1156 |
+
|
| 1157 |
+
return (
|
| 1158 |
+
glbfile,
|
| 1159 |
+
segmented_glb,
|
| 1160 |
+
log_msg,
|
| 1161 |
+
gr.Dropdown(choices=frame_filter_choices, value=frame_filter, interactive=True),
|
| 1162 |
+
processed_data,
|
| 1163 |
+
depth_vis,
|
| 1164 |
+
normal_vis,
|
| 1165 |
+
measure_img,
|
| 1166 |
+
"",
|
| 1167 |
+
depth_selector,
|
| 1168 |
+
normal_selector,
|
| 1169 |
+
measure_selector,
|
| 1170 |
+
)
|
| 1171 |
+
|
| 1172 |
+
|
| 1173 |
+
def colorize_depth(depth_map, mask=None):
|
| 1174 |
+
"""Convert depth map to colorized visualization with optional mask"""
|
| 1175 |
+
if depth_map is None:
|
| 1176 |
+
return None
|
| 1177 |
+
|
| 1178 |
+
depth_normalized = depth_map.copy()
|
| 1179 |
+
valid_mask = depth_normalized > 0
|
| 1180 |
+
|
| 1181 |
+
if mask is not None:
|
| 1182 |
+
valid_mask = valid_mask & mask
|
| 1183 |
+
|
| 1184 |
+
if valid_mask.sum() > 0:
|
| 1185 |
+
valid_depths = depth_normalized[valid_mask]
|
| 1186 |
+
p5 = np.percentile(valid_depths, 5)
|
| 1187 |
+
p95 = np.percentile(valid_depths, 95)
|
| 1188 |
+
|
| 1189 |
+
depth_normalized[valid_mask] = (depth_normalized[valid_mask] - p5) / (p95 - p5)
|
| 1190 |
+
|
| 1191 |
+
import matplotlib.pyplot as plt
|
| 1192 |
+
|
| 1193 |
+
colormap = plt.cm.turbo_r
|
| 1194 |
+
colored = colormap(depth_normalized)
|
| 1195 |
+
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
| 1196 |
+
|
| 1197 |
+
colored[~valid_mask] = [255, 255, 255]
|
| 1198 |
+
|
| 1199 |
+
return colored
|
| 1200 |
+
|
| 1201 |
+
|
| 1202 |
+
def colorize_normal(normal_map, mask=None):
|
| 1203 |
+
"""Convert normal map to colorized visualization with optional mask"""
|
| 1204 |
+
if normal_map is None:
|
| 1205 |
+
return None
|
| 1206 |
+
|
| 1207 |
+
normal_vis = normal_map.copy()
|
| 1208 |
+
|
| 1209 |
+
if mask is not None:
|
| 1210 |
+
invalid_mask = ~mask
|
| 1211 |
+
normal_vis[invalid_mask] = [0, 0, 0]
|
| 1212 |
+
|
| 1213 |
+
normal_vis = (normal_vis + 1.0) / 2.0
|
| 1214 |
+
normal_vis = (normal_vis * 255).astype(np.uint8)
|
| 1215 |
+
|
| 1216 |
+
return normal_vis
|
| 1217 |
+
|
| 1218 |
+
|
| 1219 |
+
def process_predictions_for_visualization(
|
| 1220 |
+
predictions, views, high_level_config, filter_black_bg=False, filter_white_bg=False
|
| 1221 |
+
):
|
| 1222 |
+
"""Extract depth, normal, and 3D points from predictions for visualization"""
|
| 1223 |
+
processed_data = {}
|
| 1224 |
+
|
| 1225 |
+
for view_idx, view in enumerate(views):
|
| 1226 |
+
image = rgb(view["img"], norm_type=high_level_config["data_norm_type"])
|
| 1227 |
+
|
| 1228 |
+
pred_pts3d = predictions["world_points"][view_idx]
|
| 1229 |
+
|
| 1230 |
+
view_data = {
|
| 1231 |
+
"image": image[0],
|
| 1232 |
+
"points3d": pred_pts3d,
|
| 1233 |
+
"depth": None,
|
| 1234 |
+
"normal": None,
|
| 1235 |
+
"mask": None,
|
| 1236 |
+
}
|
| 1237 |
+
|
| 1238 |
+
mask = predictions["final_mask"][view_idx].copy()
|
| 1239 |
+
|
| 1240 |
+
if filter_black_bg:
|
| 1241 |
+
view_colors = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 1242 |
+
black_bg_mask = view_colors.sum(axis=2) >= 16
|
| 1243 |
+
mask = mask & black_bg_mask
|
| 1244 |
+
|
| 1245 |
+
if filter_white_bg:
|
| 1246 |
+
view_colors = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 1247 |
+
white_bg_mask = ~(
|
| 1248 |
+
(view_colors[:, :, 0] > 240)
|
| 1249 |
+
& (view_colors[:, :, 1] > 240)
|
| 1250 |
+
& (view_colors[:, :, 2] > 240)
|
| 1251 |
+
)
|
| 1252 |
+
mask = mask & white_bg_mask
|
| 1253 |
+
|
| 1254 |
+
view_data["mask"] = mask
|
| 1255 |
+
view_data["depth"] = predictions["depth"][view_idx].squeeze()
|
| 1256 |
+
|
| 1257 |
+
normals, _ = points_to_normals(pred_pts3d, mask=view_data["mask"])
|
| 1258 |
+
view_data["normal"] = normals
|
| 1259 |
+
|
| 1260 |
+
processed_data[view_idx] = view_data
|
| 1261 |
+
|
| 1262 |
+
return processed_data
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
def reset_measure(processed_data):
|
| 1266 |
+
"""Reset measure points"""
|
| 1267 |
+
if processed_data is None or len(processed_data) == 0:
|
| 1268 |
+
return None, [], ""
|
| 1269 |
+
|
| 1270 |
+
first_view = list(processed_data.values())[0]
|
| 1271 |
+
return first_view["image"], [], ""
|
| 1272 |
+
|
| 1273 |
+
|
| 1274 |
+
def measure(
|
| 1275 |
+
processed_data, measure_points, current_view_selector, event: gr.SelectData
|
| 1276 |
+
):
|
| 1277 |
+
"""Handle measurement on images"""
|
| 1278 |
+
try:
|
| 1279 |
+
print(f"测量功能调用,选择器: {current_view_selector}")
|
| 1280 |
+
|
| 1281 |
+
if processed_data is None or len(processed_data) == 0:
|
| 1282 |
+
return None, [], "❌ 没有可用数据"
|
| 1283 |
+
|
| 1284 |
+
try:
|
| 1285 |
+
current_view_index = int(current_view_selector.split()[1]) - 1
|
| 1286 |
+
except:
|
| 1287 |
+
current_view_index = 0
|
| 1288 |
+
|
| 1289 |
+
print(f"使用视图索引: {current_view_index}")
|
| 1290 |
+
|
| 1291 |
+
if current_view_index < 0 or current_view_index >= len(processed_data):
|
| 1292 |
+
current_view_index = 0
|
| 1293 |
+
|
| 1294 |
+
view_keys = list(processed_data.keys())
|
| 1295 |
+
current_view = processed_data[view_keys[current_view_index]]
|
| 1296 |
+
|
| 1297 |
+
if current_view is None:
|
| 1298 |
+
return None, [], "❌ 没有视图数据"
|
| 1299 |
+
|
| 1300 |
+
point2d = event.index[0], event.index[1]
|
| 1301 |
+
print(f"点击点: {point2d}")
|
| 1302 |
+
|
| 1303 |
+
if (
|
| 1304 |
+
current_view["mask"] is not None
|
| 1305 |
+
and 0 <= point2d[1] < current_view["mask"].shape[0]
|
| 1306 |
+
and 0 <= point2d[0] < current_view["mask"].shape[1]
|
| 1307 |
+
):
|
| 1308 |
+
if not current_view["mask"][point2d[1], point2d[0]]:
|
| 1309 |
+
print(f"点击点 {point2d} 在遮罩区域,忽略点击")
|
| 1310 |
+
masked_image, _ = update_measure_view(
|
| 1311 |
+
processed_data, current_view_index
|
| 1312 |
+
)
|
| 1313 |
+
return (
|
| 1314 |
+
masked_image,
|
| 1315 |
+
measure_points,
|
| 1316 |
+
'<span style="color: red; font-weight: bold;">⚠️ 无法在遮罩区域测量(显示为灰色)</span>',
|
| 1317 |
+
)
|
| 1318 |
+
|
| 1319 |
+
measure_points.append(point2d)
|
| 1320 |
+
|
| 1321 |
+
image, _ = update_measure_view(processed_data, current_view_index)
|
| 1322 |
+
if image is None:
|
| 1323 |
+
return None, [], "❌ 没有可用图像"
|
| 1324 |
+
|
| 1325 |
+
image = image.copy()
|
| 1326 |
+
points3d = current_view["points3d"]
|
| 1327 |
+
|
| 1328 |
+
try:
|
| 1329 |
+
if image.dtype != np.uint8:
|
| 1330 |
+
if image.max() <= 1.0:
|
| 1331 |
+
image = (image * 255).astype(np.uint8)
|
| 1332 |
+
else:
|
| 1333 |
+
image = image.astype(np.uint8)
|
| 1334 |
+
except Exception as e:
|
| 1335 |
+
print(f"图像转换错误: {e}")
|
| 1336 |
+
return None, [], f"❌ 图像转换错误: {e}"
|
| 1337 |
+
|
| 1338 |
+
try:
|
| 1339 |
+
for p in measure_points:
|
| 1340 |
+
if 0 <= p[0] < image.shape[1] and 0 <= p[1] < image.shape[0]:
|
| 1341 |
+
image = cv2.circle(
|
| 1342 |
+
image, p, radius=5, color=(255, 0, 0), thickness=2
|
| 1343 |
+
)
|
| 1344 |
+
except Exception as e:
|
| 1345 |
+
print(f"绘制错误: {e}")
|
| 1346 |
+
return None, [], f"❌ 绘制错误: {e}"
|
| 1347 |
+
|
| 1348 |
+
depth_text = ""
|
| 1349 |
+
try:
|
| 1350 |
+
for i, p in enumerate(measure_points):
|
| 1351 |
+
if (
|
| 1352 |
+
current_view["depth"] is not None
|
| 1353 |
+
and 0 <= p[1] < current_view["depth"].shape[0]
|
| 1354 |
+
and 0 <= p[0] < current_view["depth"].shape[1]
|
| 1355 |
+
):
|
| 1356 |
+
d = current_view["depth"][p[1], p[0]]
|
| 1357 |
+
depth_text += f"- **P{i + 1} 深度: {d:.2f}m**\n"
|
| 1358 |
+
else:
|
| 1359 |
+
if (
|
| 1360 |
+
points3d is not None
|
| 1361 |
+
and 0 <= p[1] < points3d.shape[0]
|
| 1362 |
+
and 0 <= p[0] < points3d.shape[1]
|
| 1363 |
+
):
|
| 1364 |
+
z = points3d[p[1], p[0], 2]
|
| 1365 |
+
depth_text += f"- **P{i + 1} Z坐标: {z:.2f}m**\n"
|
| 1366 |
+
except Exception as e:
|
| 1367 |
+
print(f"深度文本错误: {e}")
|
| 1368 |
+
depth_text = f"❌ 深度计算错误: {e}\n"
|
| 1369 |
+
|
| 1370 |
+
if len(measure_points) == 2:
|
| 1371 |
+
try:
|
| 1372 |
+
point1, point2 = measure_points
|
| 1373 |
+
if (
|
| 1374 |
+
0 <= point1[0] < image.shape[1]
|
| 1375 |
+
and 0 <= point1[1] < image.shape[0]
|
| 1376 |
+
and 0 <= point2[0] < image.shape[1]
|
| 1377 |
+
and 0 <= point2[1] < image.shape[0]
|
| 1378 |
+
):
|
| 1379 |
+
image = cv2.line(
|
| 1380 |
+
image, point1, point2, color=(255, 0, 0), thickness=2
|
| 1381 |
+
)
|
| 1382 |
+
|
| 1383 |
+
distance_text = "- **距离: 无法计算**"
|
| 1384 |
+
if (
|
| 1385 |
+
points3d is not None
|
| 1386 |
+
and 0 <= point1[1] < points3d.shape[0]
|
| 1387 |
+
and 0 <= point1[0] < points3d.shape[1]
|
| 1388 |
+
and 0 <= point2[1] < points3d.shape[0]
|
| 1389 |
+
and 0 <= point2[0] < points3d.shape[1]
|
| 1390 |
+
):
|
| 1391 |
+
try:
|
| 1392 |
+
p1_3d = points3d[point1[1], point1[0]]
|
| 1393 |
+
p2_3d = points3d[point2[1], point2[0]]
|
| 1394 |
+
distance = np.linalg.norm(p1_3d - p2_3d)
|
| 1395 |
+
distance_text = f"- **距离: {distance:.2f}m**"
|
| 1396 |
+
except Exception as e:
|
| 1397 |
+
print(f"距离计算错误: {e}")
|
| 1398 |
+
distance_text = f"- **距离计算错误: {e}**"
|
| 1399 |
+
|
| 1400 |
+
measure_points = []
|
| 1401 |
+
text = depth_text + distance_text
|
| 1402 |
+
print(f"测量完成: {text}")
|
| 1403 |
+
return [image, measure_points, text]
|
| 1404 |
+
except Exception as e:
|
| 1405 |
+
print(f"最终测量错误: {e}")
|
| 1406 |
+
return None, [], f"❌ 测量错误: {e}"
|
| 1407 |
+
else:
|
| 1408 |
+
print(f"单点测量: {depth_text}")
|
| 1409 |
+
return [image, measure_points, depth_text]
|
| 1410 |
+
|
| 1411 |
+
except Exception as e:
|
| 1412 |
+
print(f"整体测量功能错误: {e}")
|
| 1413 |
+
return None, [], f"❌ 测量功能错误: {e}"
|
| 1414 |
+
|
| 1415 |
+
|
| 1416 |
+
def clear_fields():
|
| 1417 |
+
"""清空 3D 查看器"""
|
| 1418 |
+
return None, None
|
| 1419 |
+
|
| 1420 |
+
|
| 1421 |
+
def update_log():
|
| 1422 |
+
"""显示日志消息"""
|
| 1423 |
+
return "🔄 加载和重建中..."
|
| 1424 |
+
|
| 1425 |
+
|
| 1426 |
+
def update_visualization(
|
| 1427 |
+
target_dir,
|
| 1428 |
+
frame_filter,
|
| 1429 |
+
show_cam,
|
| 1430 |
+
is_example,
|
| 1431 |
+
conf_thres=None,
|
| 1432 |
+
filter_black_bg=False,
|
| 1433 |
+
filter_white_bg=False,
|
| 1434 |
+
show_mesh=True,
|
| 1435 |
+
):
|
| 1436 |
+
"""更新可视化"""
|
| 1437 |
+
if is_example == "True":
|
| 1438 |
+
return gr.update(), "❌ 没有可用的重建。请先点击重建按钮。"
|
| 1439 |
+
|
| 1440 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 1441 |
+
return gr.update(), "❌ 没有可用的重建。请先点击重建按钮。"
|
| 1442 |
+
|
| 1443 |
+
predictions_path = os.path.join(target_dir, "predictions.npz")
|
| 1444 |
+
if not os.path.exists(predictions_path):
|
| 1445 |
+
return gr.update(), f"❌ 没有可用的重建。请先运行「重建」。"
|
| 1446 |
+
|
| 1447 |
+
loaded = np.load(predictions_path, allow_pickle=True)
|
| 1448 |
+
predictions = {key: loaded[key] for key in loaded.keys()}
|
| 1449 |
+
|
| 1450 |
+
glbfile = os.path.join(
|
| 1451 |
+
target_dir,
|
| 1452 |
+
f"glbscene_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}_mesh{show_mesh}_black{filter_black_bg}_white{filter_white_bg}.glb",
|
| 1453 |
+
)
|
| 1454 |
+
|
| 1455 |
+
glbscene = predictions_to_glb(
|
| 1456 |
+
predictions,
|
| 1457 |
+
filter_by_frames=frame_filter,
|
| 1458 |
+
show_cam=show_cam,
|
| 1459 |
+
mask_black_bg=filter_black_bg,
|
| 1460 |
+
mask_white_bg=filter_white_bg,
|
| 1461 |
+
as_mesh=show_mesh,
|
| 1462 |
+
conf_percentile=conf_thres,
|
| 1463 |
+
)
|
| 1464 |
+
glbscene.export(file_obj=glbfile)
|
| 1465 |
+
|
| 1466 |
+
return glbfile, "✅ 可视化已更新。"
|
| 1467 |
+
|
| 1468 |
+
|
| 1469 |
+
def update_all_views_on_filter_change(
|
| 1470 |
+
target_dir,
|
| 1471 |
+
filter_black_bg,
|
| 1472 |
+
filter_white_bg,
|
| 1473 |
+
processed_data,
|
| 1474 |
+
depth_view_selector,
|
| 1475 |
+
normal_view_selector,
|
| 1476 |
+
measure_view_selector,
|
| 1477 |
+
):
|
| 1478 |
+
"""Update all individual view tabs when background filtering checkboxes change"""
|
| 1479 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 1480 |
+
return processed_data, None, None, None, []
|
| 1481 |
+
|
| 1482 |
+
predictions_path = os.path.join(target_dir, "predictions.npz")
|
| 1483 |
+
if not os.path.exists(predictions_path):
|
| 1484 |
+
return processed_data, None, None, None, []
|
| 1485 |
+
|
| 1486 |
+
try:
|
| 1487 |
+
loaded = np.load(predictions_path, allow_pickle=True)
|
| 1488 |
+
predictions = {key: loaded[key] for key in loaded.keys()}
|
| 1489 |
+
|
| 1490 |
+
image_folder_path = os.path.join(target_dir, "images")
|
| 1491 |
+
views = load_images(image_folder_path)
|
| 1492 |
+
|
| 1493 |
+
new_processed_data = process_predictions_for_visualization(
|
| 1494 |
+
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 1495 |
+
)
|
| 1496 |
+
|
| 1497 |
+
try:
|
| 1498 |
+
depth_view_idx = (
|
| 1499 |
+
int(depth_view_selector.split()[1]) - 1 if depth_view_selector else 0
|
| 1500 |
+
)
|
| 1501 |
+
except:
|
| 1502 |
+
depth_view_idx = 0
|
| 1503 |
+
|
| 1504 |
+
try:
|
| 1505 |
+
normal_view_idx = (
|
| 1506 |
+
int(normal_view_selector.split()[1]) - 1 if normal_view_selector else 0
|
| 1507 |
+
)
|
| 1508 |
+
except:
|
| 1509 |
+
normal_view_idx = 0
|
| 1510 |
+
|
| 1511 |
+
try:
|
| 1512 |
+
measure_view_idx = (
|
| 1513 |
+
int(measure_view_selector.split()[1]) - 1
|
| 1514 |
+
if measure_view_selector
|
| 1515 |
+
else 0
|
| 1516 |
+
)
|
| 1517 |
+
except:
|
| 1518 |
+
measure_view_idx = 0
|
| 1519 |
+
|
| 1520 |
+
depth_vis = update_depth_view(new_processed_data, depth_view_idx)
|
| 1521 |
+
normal_vis = update_normal_view(new_processed_data, normal_view_idx)
|
| 1522 |
+
measure_img, _ = update_measure_view(new_processed_data, measure_view_idx)
|
| 1523 |
+
|
| 1524 |
+
return new_processed_data, depth_vis, normal_vis, measure_img, []
|
| 1525 |
+
|
| 1526 |
+
except Exception as e:
|
| 1527 |
+
print(f"Error updating views on filter change: {e}")
|
| 1528 |
+
return processed_data, None, None, None, []
|
| 1529 |
+
|
| 1530 |
+
|
| 1531 |
+
# ============================================================================
|
| 1532 |
+
# Example Scene Functions
|
| 1533 |
+
# ============================================================================
|
| 1534 |
+
|
| 1535 |
+
def get_scene_info(examples_dir):
|
| 1536 |
+
"""Get information about scenes in the examples directory"""
|
| 1537 |
+
import glob
|
| 1538 |
+
|
| 1539 |
+
scenes = []
|
| 1540 |
+
if not os.path.exists(examples_dir):
|
| 1541 |
+
return scenes
|
| 1542 |
+
|
| 1543 |
+
for scene_folder in sorted(os.listdir(examples_dir)):
|
| 1544 |
+
scene_path = os.path.join(examples_dir, scene_folder)
|
| 1545 |
+
if os.path.isdir(scene_path):
|
| 1546 |
+
image_extensions = ["*.jpg", "*.jpeg", "*.png", "*.bmp", "*.tiff", "*.tif"]
|
| 1547 |
+
image_files = []
|
| 1548 |
+
for ext in image_extensions:
|
| 1549 |
+
image_files.extend(glob.glob(os.path.join(scene_path, ext)))
|
| 1550 |
+
image_files.extend(glob.glob(os.path.join(scene_path, ext.upper())))
|
| 1551 |
+
|
| 1552 |
+
if image_files:
|
| 1553 |
+
image_files = sorted(image_files)
|
| 1554 |
+
first_image = image_files[0]
|
| 1555 |
+
num_images = len(image_files)
|
| 1556 |
+
|
| 1557 |
+
scenes.append(
|
| 1558 |
+
{
|
| 1559 |
+
"name": scene_folder,
|
| 1560 |
+
"path": scene_path,
|
| 1561 |
+
"thumbnail": first_image,
|
| 1562 |
+
"num_images": num_images,
|
| 1563 |
+
"image_files": image_files,
|
| 1564 |
+
}
|
| 1565 |
+
)
|
| 1566 |
+
|
| 1567 |
+
return scenes
|
| 1568 |
+
|
| 1569 |
+
|
| 1570 |
+
def load_example_scene(scene_name, examples_dir="examples"):
|
| 1571 |
+
"""从示例目录加载场景"""
|
| 1572 |
+
scenes = get_scene_info(examples_dir)
|
| 1573 |
+
|
| 1574 |
+
selected_scene = None
|
| 1575 |
+
for scene in scenes:
|
| 1576 |
+
if scene["name"] == scene_name:
|
| 1577 |
+
selected_scene = scene
|
| 1578 |
+
break
|
| 1579 |
+
|
| 1580 |
+
if selected_scene is None:
|
| 1581 |
+
return None, None, None, None, "❌ 场景未找到"
|
| 1582 |
+
|
| 1583 |
+
file_objects = []
|
| 1584 |
+
for image_path in selected_scene["image_files"]:
|
| 1585 |
+
file_objects.append(image_path)
|
| 1586 |
+
|
| 1587 |
+
target_dir, image_paths = handle_uploads(file_objects, 1.0)
|
| 1588 |
+
|
| 1589 |
+
return (
|
| 1590 |
+
None,
|
| 1591 |
+
None,
|
| 1592 |
+
target_dir,
|
| 1593 |
+
image_paths,
|
| 1594 |
+
f"✅ 已加载场景 '{scene_name}' ({selected_scene['num_images']} 张图像)。点击「开始重建」进行 3D 处理。",
|
| 1595 |
+
)
|
| 1596 |
+
|
| 1597 |
+
|
| 1598 |
+
# ============================================================================
|
| 1599 |
+
# Gradio UI
|
| 1600 |
+
# ============================================================================
|
| 1601 |
+
|
| 1602 |
+
theme = get_gradio_theme()
|
| 1603 |
+
|
| 1604 |
+
# 自定义CSS防止UI抖动
|
| 1605 |
+
CUSTOM_CSS = GRADIO_CSS + """
|
| 1606 |
+
/* 防止组件撑开布局 */
|
| 1607 |
+
.gradio-container {
|
| 1608 |
+
max-width: 100% !important;
|
| 1609 |
+
}
|
| 1610 |
+
|
| 1611 |
+
/* 固定Gallery高度 */
|
| 1612 |
+
.gallery-container {
|
| 1613 |
+
max-height: 350px !important;
|
| 1614 |
+
overflow-y: auto !important;
|
| 1615 |
+
}
|
| 1616 |
+
|
| 1617 |
+
/* 固定File组件高度 */
|
| 1618 |
+
.file-preview {
|
| 1619 |
+
max-height: 200px !important;
|
| 1620 |
+
overflow-y: auto !important;
|
| 1621 |
+
}
|
| 1622 |
+
|
| 1623 |
+
/* 固定Video组件高度 */
|
| 1624 |
+
.video-container {
|
| 1625 |
+
max-height: 300px !important;
|
| 1626 |
+
}
|
| 1627 |
+
|
| 1628 |
+
/* 防止Textbox无限扩展 */
|
| 1629 |
+
.textbox-container {
|
| 1630 |
+
max-height: 100px !important;
|
| 1631 |
+
}
|
| 1632 |
+
|
| 1633 |
+
/* 保持Tabs内容区域稳定 */
|
| 1634 |
+
.tab-content {
|
| 1635 |
+
min-height: 550px !important;
|
| 1636 |
+
}
|
| 1637 |
+
"""
|
| 1638 |
+
|
| 1639 |
+
with gr.Blocks(theme=theme, css=CUSTOM_CSS, title="MapAnything V2 - 3D重建与物体分割") as demo:
|
| 1640 |
+
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
| 1641 |
+
processed_data_state = gr.State(value=None)
|
| 1642 |
+
measure_points_state = gr.State(value=[])
|
| 1643 |
+
|
| 1644 |
+
# 顶部标题
|
| 1645 |
+
gr.HTML("""
|
| 1646 |
+
<div style="text-align: center; margin: 20px 0;">
|
| 1647 |
+
<h2 style="color: #1976D2; margin-bottom: 10px;">MapAnything V2 - 3D重建与物体分割</h2>
|
| 1648 |
+
<p style="color: #666; font-size: 16px;">基于DBSCAN聚类的智能物体识别 | 多视图融合 | 自适应参数调整</p>
|
| 1649 |
+
</div>
|
| 1650 |
+
""")
|
| 1651 |
+
|
| 1652 |
+
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
| 1653 |
+
|
| 1654 |
+
with gr.Row(equal_height=False):
|
| 1655 |
+
# 左侧:输入区域
|
| 1656 |
+
with gr.Column(scale=1, min_width=300):
|
| 1657 |
+
gr.Markdown("### 📤 输入")
|
| 1658 |
+
|
| 1659 |
+
with gr.Tabs():
|
| 1660 |
+
with gr.Tab("📷 图片"):
|
| 1661 |
+
input_images = gr.File(
|
| 1662 |
+
file_count="multiple",
|
| 1663 |
+
label="上传多张图片(推荐3-10张)",
|
| 1664 |
+
interactive=True,
|
| 1665 |
+
height=200
|
| 1666 |
+
)
|
| 1667 |
+
|
| 1668 |
+
with gr.Tab("🎥 视频"):
|
| 1669 |
+
input_video = gr.Video(
|
| 1670 |
+
label="上传视频",
|
| 1671 |
+
interactive=True,
|
| 1672 |
+
height=300
|
| 1673 |
+
)
|
| 1674 |
+
s_time_interval = gr.Slider(
|
| 1675 |
+
minimum=0.1, maximum=5.0, value=1.0, step=0.1,
|
| 1676 |
+
label="帧采样间隔(秒)", interactive=True
|
| 1677 |
+
)
|
| 1678 |
+
|
| 1679 |
+
image_gallery = gr.Gallery(
|
| 1680 |
+
label="图片预览", columns=3, height=350,
|
| 1681 |
+
show_download_button=True, object_fit="contain", preview=True
|
| 1682 |
+
)
|
| 1683 |
+
|
| 1684 |
+
with gr.Row():
|
| 1685 |
+
submit_btn = gr.Button("🚀 开始重建", variant="primary", scale=2)
|
| 1686 |
+
clear_btn = gr.ClearButton(
|
| 1687 |
+
[input_video, input_images, target_dir_output, image_gallery],
|
| 1688 |
+
value="🗑️ 清空", scale=1
|
| 1689 |
+
)
|
| 1690 |
+
|
| 1691 |
+
# 右侧:输出区域
|
| 1692 |
+
with gr.Column(scale=2, min_width=600):
|
| 1693 |
+
gr.Markdown("### 🎯 输出")
|
| 1694 |
+
|
| 1695 |
+
with gr.Tabs():
|
| 1696 |
+
with gr.Tab("🏗️ 原始3D"):
|
| 1697 |
+
reconstruction_output = gr.Model3D(
|
| 1698 |
+
height=550, zoom_speed=0.5, pan_speed=0.5,
|
| 1699 |
+
clear_color=[0.0, 0.0, 0.0, 0.0]
|
| 1700 |
+
)
|
| 1701 |
+
|
| 1702 |
+
with gr.Tab("🎨 分割3D"):
|
| 1703 |
+
segmented_output = gr.Model3D(
|
| 1704 |
+
height=550, zoom_speed=0.5, pan_speed=0.5,
|
| 1705 |
+
clear_color=[0.0, 0.0, 0.0, 0.0]
|
| 1706 |
+
)
|
| 1707 |
+
|
| 1708 |
+
with gr.Tab("📊 深度图"):
|
| 1709 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1710 |
+
prev_depth_btn = gr.Button("◀", size="sm", scale=1)
|
| 1711 |
+
depth_view_selector = gr.Dropdown(
|
| 1712 |
+
choices=["View 1"], value="View 1",
|
| 1713 |
+
label="视图", scale=3, interactive=True
|
| 1714 |
+
)
|
| 1715 |
+
next_depth_btn = gr.Button("▶", size="sm", scale=1)
|
| 1716 |
+
depth_map = gr.Image(
|
| 1717 |
+
type="numpy", label="", format="png", interactive=False,
|
| 1718 |
+
height=500
|
| 1719 |
+
)
|
| 1720 |
+
|
| 1721 |
+
with gr.Tab("🧭 法线图"):
|
| 1722 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1723 |
+
prev_normal_btn = gr.Button("◀", size="sm", scale=1)
|
| 1724 |
+
normal_view_selector = gr.Dropdown(
|
| 1725 |
+
choices=["View 1"], value="View 1",
|
| 1726 |
+
label="视图", scale=3, interactive=True
|
| 1727 |
+
)
|
| 1728 |
+
next_normal_btn = gr.Button("▶", size="sm", scale=1)
|
| 1729 |
+
normal_map = gr.Image(
|
| 1730 |
+
type="numpy", label="", format="png", interactive=False,
|
| 1731 |
+
height=500
|
| 1732 |
+
)
|
| 1733 |
+
|
| 1734 |
+
with gr.Tab("📏 测量"):
|
| 1735 |
+
gr.Markdown("**点击图片两次进行距离测量**")
|
| 1736 |
+
with gr.Row(elem_classes=["navigation-row"]):
|
| 1737 |
+
prev_measure_btn = gr.Button("◀", size="sm", scale=1)
|
| 1738 |
+
measure_view_selector = gr.Dropdown(
|
| 1739 |
+
choices=["View 1"], value="View 1",
|
| 1740 |
+
label="视图", scale=3, interactive=True
|
| 1741 |
+
)
|
| 1742 |
+
next_measure_btn = gr.Button("▶", size="sm", scale=1)
|
| 1743 |
+
measure_image = gr.Image(
|
| 1744 |
+
type="numpy", show_label=False,
|
| 1745 |
+
format="webp", interactive=False, sources=[],
|
| 1746 |
+
height=500
|
| 1747 |
+
)
|
| 1748 |
+
measure_text = gr.Markdown("")
|
| 1749 |
+
|
| 1750 |
+
log_output = gr.Textbox(
|
| 1751 |
+
value="📌 请上传图片或视频,然后点击「开始重建」",
|
| 1752 |
+
label="状态信息",
|
| 1753 |
+
interactive=False,
|
| 1754 |
+
lines=1,
|
| 1755 |
+
max_lines=1
|
| 1756 |
+
)
|
| 1757 |
+
|
| 1758 |
+
# 高级选项(可折叠)
|
| 1759 |
+
with gr.Accordion("⚙️ 高级选项", open=False):
|
| 1760 |
+
with gr.Row(equal_height=False):
|
| 1761 |
+
with gr.Column(scale=1, min_width=300):
|
| 1762 |
+
gr.Markdown("#### 可视化参数")
|
| 1763 |
+
frame_filter = gr.Dropdown(
|
| 1764 |
+
choices=["All"], value="All", label="显示帧"
|
| 1765 |
+
)
|
| 1766 |
+
conf_thres = gr.Slider(
|
| 1767 |
+
minimum=0, maximum=100, value=0, step=0.1,
|
| 1768 |
+
label="置信度阈值(百分位)"
|
| 1769 |
+
)
|
| 1770 |
+
show_cam = gr.Checkbox(label="显示相机", value=True)
|
| 1771 |
+
show_mesh = gr.Checkbox(label="显示网格", value=True)
|
| 1772 |
+
filter_black_bg = gr.Checkbox(label="过滤黑色背景", value=False)
|
| 1773 |
+
filter_white_bg = gr.Checkbox(label="过滤白色背景", value=False)
|
| 1774 |
+
|
| 1775 |
+
with gr.Column(scale=1, min_width=300):
|
| 1776 |
+
gr.Markdown("#### 重建参数")
|
| 1777 |
+
apply_mask_checkbox = gr.Checkbox(
|
| 1778 |
+
label="应用深度掩码", value=True
|
| 1779 |
+
)
|
| 1780 |
+
|
| 1781 |
+
gr.Markdown("#### 分割参数")
|
| 1782 |
+
enable_segmentation = gr.Checkbox(
|
| 1783 |
+
label="启用语义分割", value=False
|
| 1784 |
+
)
|
| 1785 |
+
|
| 1786 |
+
text_prompt = gr.Textbox(
|
| 1787 |
+
value=DEFAULT_TEXT_PROMPT,
|
| 1788 |
+
label="检测物体(用 . 分隔)",
|
| 1789 |
+
placeholder="例如: chair . table . sofa",
|
| 1790 |
+
lines=2,
|
| 1791 |
+
max_lines=2
|
| 1792 |
+
)
|
| 1793 |
+
|
| 1794 |
+
with gr.Row():
|
| 1795 |
+
detect_all_btn = gr.Button("🔍 检测所有", size="sm")
|
| 1796 |
+
restore_default_btn = gr.Button("↻ 默认", size="sm")
|
| 1797 |
+
|
| 1798 |
+
# 示例场景(可折叠)
|
| 1799 |
+
with gr.Accordion("🖼️ 示例场景", open=False):
|
| 1800 |
+
scenes = get_scene_info("examples")
|
| 1801 |
+
if scenes:
|
| 1802 |
+
for i in range(0, len(scenes), 4):
|
| 1803 |
+
with gr.Row(equal_height=True):
|
| 1804 |
+
for j in range(4):
|
| 1805 |
+
scene_idx = i + j
|
| 1806 |
+
if scene_idx < len(scenes):
|
| 1807 |
+
scene = scenes[scene_idx]
|
| 1808 |
+
with gr.Column(scale=1, min_width=150):
|
| 1809 |
+
scene_img = gr.Image(
|
| 1810 |
+
value=scene["thumbnail"],
|
| 1811 |
+
height=150,
|
| 1812 |
+
interactive=False,
|
| 1813 |
+
show_label=False,
|
| 1814 |
+
sources=[],
|
| 1815 |
+
container=False
|
| 1816 |
+
)
|
| 1817 |
+
gr.Markdown(
|
| 1818 |
+
f"**{scene['name']}** ({scene['num_images']}张)",
|
| 1819 |
+
elem_classes=["text-center"]
|
| 1820 |
+
)
|
| 1821 |
+
scene_img.select(
|
| 1822 |
+
fn=lambda name=scene["name"]: load_example_scene(name),
|
| 1823 |
+
outputs=[
|
| 1824 |
+
reconstruction_output, segmented_output,
|
| 1825 |
+
target_dir_output, image_gallery, log_output
|
| 1826 |
+
]
|
| 1827 |
+
)
|
| 1828 |
+
|
| 1829 |
+
# === 事件绑定 ===
|
| 1830 |
+
|
| 1831 |
+
# 分割选项按钮
|
| 1832 |
+
detect_all_btn.click(
|
| 1833 |
+
fn=lambda: COMMON_OBJECTS_PROMPT,
|
| 1834 |
+
outputs=[text_prompt]
|
| 1835 |
+
)
|
| 1836 |
+
restore_default_btn.click(
|
| 1837 |
+
fn=lambda: DEFAULT_TEXT_PROMPT,
|
| 1838 |
+
outputs=[text_prompt]
|
| 1839 |
+
)
|
| 1840 |
+
|
| 1841 |
+
# 上传文件自动更新
|
| 1842 |
+
def update_gallery_on_unified_upload(files_video, files_images, interval):
|
| 1843 |
+
if not files_video and not files_images:
|
| 1844 |
+
return None, None, None, None
|
| 1845 |
+
# Combine both inputs
|
| 1846 |
+
all_files = []
|
| 1847 |
+
if files_video:
|
| 1848 |
+
all_files.append(files_video)
|
| 1849 |
+
if files_images:
|
| 1850 |
+
all_files.extend(files_images)
|
| 1851 |
+
target_dir, image_paths = handle_uploads(all_files, interval)
|
| 1852 |
+
return (
|
| 1853 |
+
None,
|
| 1854 |
+
target_dir,
|
| 1855 |
+
image_paths,
|
| 1856 |
+
"✅ 上传完成,点击「开始重建」进行 3D 处理",
|
| 1857 |
+
)
|
| 1858 |
+
|
| 1859 |
+
input_video.change(
|
| 1860 |
+
fn=update_gallery_on_unified_upload,
|
| 1861 |
+
inputs=[input_video, input_images, s_time_interval],
|
| 1862 |
+
outputs=[segmented_output, target_dir_output, image_gallery, log_output]
|
| 1863 |
+
)
|
| 1864 |
+
input_images.change(
|
| 1865 |
+
fn=update_gallery_on_unified_upload,
|
| 1866 |
+
inputs=[input_video, input_images, s_time_interval],
|
| 1867 |
+
outputs=[segmented_output, target_dir_output, image_gallery, log_output]
|
| 1868 |
+
)
|
| 1869 |
+
|
| 1870 |
+
# 重建按钮
|
| 1871 |
+
submit_btn.click(
|
| 1872 |
+
fn=clear_fields,
|
| 1873 |
+
outputs=[reconstruction_output, segmented_output]
|
| 1874 |
+
).then(
|
| 1875 |
+
fn=update_log,
|
| 1876 |
+
outputs=[log_output]
|
| 1877 |
+
).then(
|
| 1878 |
+
fn=gradio_demo,
|
| 1879 |
+
inputs=[
|
| 1880 |
+
target_dir_output, frame_filter, show_cam,
|
| 1881 |
+
filter_black_bg, filter_white_bg, conf_thres,
|
| 1882 |
+
apply_mask_checkbox, show_mesh,
|
| 1883 |
+
enable_segmentation, text_prompt
|
| 1884 |
+
],
|
| 1885 |
+
outputs=[
|
| 1886 |
+
reconstruction_output, segmented_output, log_output, frame_filter,
|
| 1887 |
+
processed_data_state, depth_map, normal_map, measure_image,
|
| 1888 |
+
measure_text, depth_view_selector, normal_view_selector, measure_view_selector
|
| 1889 |
+
]
|
| 1890 |
+
).then(
|
| 1891 |
+
fn=lambda: "False",
|
| 1892 |
+
outputs=[is_example]
|
| 1893 |
+
)
|
| 1894 |
+
|
| 1895 |
+
# 清空按钮
|
| 1896 |
+
clear_btn.add([reconstruction_output, segmented_output, log_output])
|
| 1897 |
+
|
| 1898 |
+
# 可视化参数实时更新
|
| 1899 |
+
for component in [frame_filter, show_cam, conf_thres, show_mesh]:
|
| 1900 |
+
component.change(
|
| 1901 |
+
fn=update_visualization,
|
| 1902 |
+
inputs=[
|
| 1903 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1904 |
+
conf_thres, filter_black_bg, filter_white_bg, show_mesh
|
| 1905 |
+
],
|
| 1906 |
+
outputs=[reconstruction_output, log_output]
|
| 1907 |
+
)
|
| 1908 |
+
|
| 1909 |
+
# 背景过滤器更新所有视图
|
| 1910 |
+
for bg_filter in [filter_black_bg, filter_white_bg]:
|
| 1911 |
+
bg_filter.change(
|
| 1912 |
+
fn=update_all_views_on_filter_change,
|
| 1913 |
+
inputs=[
|
| 1914 |
+
target_dir_output, filter_black_bg, filter_white_bg, processed_data_state,
|
| 1915 |
+
depth_view_selector, normal_view_selector, measure_view_selector
|
| 1916 |
+
],
|
| 1917 |
+
outputs=[processed_data_state, depth_map, normal_map, measure_image, measure_points_state]
|
| 1918 |
+
)
|
| 1919 |
+
|
| 1920 |
+
# 深度图导航
|
| 1921 |
+
prev_depth_btn.click(
|
| 1922 |
+
fn=lambda pd, cs: navigate_depth_view(pd, cs, -1),
|
| 1923 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1924 |
+
outputs=[depth_view_selector, depth_map]
|
| 1925 |
+
)
|
| 1926 |
+
next_depth_btn.click(
|
| 1927 |
+
fn=lambda pd, cs: navigate_depth_view(pd, cs, 1),
|
| 1928 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1929 |
+
outputs=[depth_view_selector, depth_map]
|
| 1930 |
+
)
|
| 1931 |
+
depth_view_selector.change(
|
| 1932 |
+
fn=lambda pd, sv: update_depth_view(pd, int(sv.split()[1]) - 1) if sv else None,
|
| 1933 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1934 |
+
outputs=[depth_map]
|
| 1935 |
+
)
|
| 1936 |
+
|
| 1937 |
+
# 法线图导航
|
| 1938 |
+
prev_normal_btn.click(
|
| 1939 |
+
fn=lambda pd, cs: navigate_normal_view(pd, cs, -1),
|
| 1940 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1941 |
+
outputs=[normal_view_selector, normal_map]
|
| 1942 |
+
)
|
| 1943 |
+
next_normal_btn.click(
|
| 1944 |
+
fn=lambda pd, cs: navigate_normal_view(pd, cs, 1),
|
| 1945 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1946 |
+
outputs=[normal_view_selector, normal_map]
|
| 1947 |
+
)
|
| 1948 |
+
normal_view_selector.change(
|
| 1949 |
+
fn=lambda pd, sv: update_normal_view(pd, int(sv.split()[1]) - 1) if sv else None,
|
| 1950 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1951 |
+
outputs=[normal_map]
|
| 1952 |
+
)
|
| 1953 |
+
|
| 1954 |
+
# 测量功能
|
| 1955 |
+
measure_image.select(
|
| 1956 |
+
fn=measure,
|
| 1957 |
+
inputs=[processed_data_state, measure_points_state, measure_view_selector],
|
| 1958 |
+
outputs=[measure_image, measure_points_state, measure_text]
|
| 1959 |
+
)
|
| 1960 |
+
prev_measure_btn.click(
|
| 1961 |
+
fn=lambda pd, cs: navigate_measure_view(pd, cs, -1),
|
| 1962 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1963 |
+
outputs=[measure_view_selector, measure_image, measure_points_state]
|
| 1964 |
+
)
|
| 1965 |
+
next_measure_btn.click(
|
| 1966 |
+
fn=lambda pd, cs: navigate_measure_view(pd, cs, 1),
|
| 1967 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1968 |
+
outputs=[measure_view_selector, measure_image, measure_points_state]
|
| 1969 |
+
)
|
| 1970 |
+
measure_view_selector.change(
|
| 1971 |
+
fn=lambda pd, sv: update_measure_view(pd, int(sv.split()[1]) - 1) if sv else (None, []),
|
| 1972 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1973 |
+
outputs=[measure_image, measure_points_state]
|
| 1974 |
+
)
|
| 1975 |
+
|
| 1976 |
+
# 启动信息
|
| 1977 |
+
print("\n" + "="*60)
|
| 1978 |
+
print("🚀 MapAnything V2 - 3D重建与物体分割")
|
| 1979 |
+
print("="*60)
|
| 1980 |
+
print("📊 核心技术: 自适应DBSCAN聚类 + 多视图融合")
|
| 1981 |
+
print(f"🔧 质量控制: 置信度≥{MIN_DETECTION_CONFIDENCE} | 面积≥{MIN_MASK_AREA}px")
|
| 1982 |
+
print(f"🎯 聚类半径: 沙发{DBSCAN_EPS_CONFIG['sofa']}m | 桌子{DBSCAN_EPS_CONFIG['table']}m | 窗户{DBSCAN_EPS_CONFIG['window']}m | 默认{DBSCAN_EPS_CONFIG['default']}m")
|
| 1983 |
+
print("="*60 + "\n")
|
| 1984 |
+
|
| 1985 |
+
demo.queue(max_size=20).launch(show_error=True, share=True, ssr_mode=False)
|
configs/calibration_benchmark.yaml
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- machine: aws
|
| 3 |
+
- model: default
|
| 4 |
+
- dataset: default
|
| 5 |
+
- _self_
|
| 6 |
+
|
| 7 |
+
output_dir: ${hydra:run.dir}
|
| 8 |
+
root_data_dir: ${machine.root_data_dir}
|
| 9 |
+
mapanything_dataset_metadata_dir: ${machine.mapanything_dataset_metadata_dir}
|
| 10 |
+
root_pretrained_checkpoints_dir: ${machine.root_pretrained_checkpoints_dir}
|
| 11 |
+
root_experiments_dir: ${machine.root_experiments_dir}
|
| 12 |
+
root_uniception_pretrained_checkpoints_dir: ${machine.root_uniception_pretrained_checkpoints_dir}
|
| 13 |
+
|
| 14 |
+
### Benchmarking args
|
| 15 |
+
seed: 0
|
| 16 |
+
# Disable CUDNN Benchmark (Disable for variable resolution & number of view training)
|
| 17 |
+
disable_cudnn_benchmark: true
|
| 18 |
+
# Batch size for inference (Metrics are computed per multi-view set and averaged, not per batch of multi-view sets)
|
| 19 |
+
batch_size: 20
|
| 20 |
+
# Use mixed precision for inference
|
| 21 |
+
amp: 1
|
| 22 |
+
# Floating point type to use for mixed precision
|
| 23 |
+
amp_dtype: "bf16"
|
configs/dataset/ase_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/ase_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ASEWAI(
|
| 3 |
+
split='${dataset.ase_wai.train.split}',
|
| 4 |
+
resolution=${dataset.ase_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.ase_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.ase_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.ase_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.ase_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.ase_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.ase_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.ase_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.ase_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.ase_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.ase_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/ase
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/ase_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ASEWAI(
|
| 3 |
+
split='${dataset.ase_wai.val.split}',
|
| 4 |
+
resolution=${dataset.ase_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.ase_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.ase_wai.val.seed},
|
| 7 |
+
transform='${dataset.ase_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.ase_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.ase_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.ase_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.ase_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.ase_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.ase_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.ase_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_ase}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/ase
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/bedlam_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/bedlam_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BedlamWAI(
|
| 3 |
+
split='${dataset.bedlam_wai.train.split}',
|
| 4 |
+
resolution=${dataset.bedlam_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.bedlam_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.bedlam_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.bedlam_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.bedlam_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.bedlam_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.bedlam_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.bedlam_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.bedlam_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.bedlam_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.bedlam_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/bedlam
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/bedlam_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BedlamWAI(
|
| 3 |
+
split='${dataset.bedlam_wai.val.split}',
|
| 4 |
+
resolution=${dataset.bedlam_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.bedlam_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.bedlam_wai.val.seed},
|
| 7 |
+
transform='${dataset.bedlam_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.bedlam_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.bedlam_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.bedlam_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.bedlam_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.bedlam_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.bedlam_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.bedlam_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_bedlam}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/bedlam
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/benchmark_512_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.512_1_52_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 10 multi-view sets from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 130 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_512_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 9 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 10 |
+
|
| 11 |
+
# Test Set
|
| 12 |
+
# Sample 10 multi-view sets from each scene
|
| 13 |
+
# ScanNet++V2: 30 scenes
|
| 14 |
+
# TartanAirV2-WB: 5 scenes
|
| 15 |
+
test_dataset:
|
| 16 |
+
"+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 17 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_518_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.518_1_52_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 10 multi-view sets from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 130 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_518_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 2
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 9 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 10 |
+
|
| 11 |
+
# Test Set
|
| 12 |
+
# Sample 10 multi-view sets from each scene
|
| 13 |
+
# ScanNet++V2: 30 scenes
|
| 14 |
+
# TartanAirV2-WB: 5 scenes
|
| 15 |
+
test_dataset:
|
| 16 |
+
"+ 300 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 17 |
+
+ 50 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/benchmark_sv_calib_518_many_ar_eth3d_snpp_tav2.yaml
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 1
|
| 6 |
+
|
| 7 |
+
# Test Resolution
|
| 8 |
+
resolution_test_eth3d: ${dataset.resolution_options.518_many_ar}
|
| 9 |
+
resolution_test_scannetpp: ${dataset.resolution_options.518_many_ar}
|
| 10 |
+
resolution_test_tav2_wb: ${dataset.resolution_options.518_many_ar}
|
| 11 |
+
|
| 12 |
+
# Test Set
|
| 13 |
+
# Sample 20 frames from each scene
|
| 14 |
+
# ETH3D: 13 scenes
|
| 15 |
+
# ScanNet++V2: 30 scenes
|
| 16 |
+
# TartanAirV2-WB: 5 scenes
|
| 17 |
+
test_dataset:
|
| 18 |
+
"+ 260 @ ${dataset.eth3d_wai.test.dataset_str}
|
| 19 |
+
+ 600 @ ${dataset.scannetpp_wai.test.dataset_str}
|
| 20 |
+
+ 100 @ ${dataset.tav2_wb_wai.test.dataset_str}"
|
configs/dataset/blendedmvs_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/blendedmvs_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BlendedMVSWAI(
|
| 3 |
+
split='${dataset.blendedmvs_wai.train.split}',
|
| 4 |
+
resolution=${dataset.blendedmvs_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.blendedmvs_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.blendedmvs_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.blendedmvs_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.blendedmvs_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.blendedmvs_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.blendedmvs_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.blendedmvs_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.blendedmvs_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.blendedmvs_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.blendedmvs_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/blendedmvs
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/blendedmvs_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"BlendedMVSWAI(
|
| 3 |
+
split='${dataset.blendedmvs_wai.val.split}',
|
| 4 |
+
resolution=${dataset.blendedmvs_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.blendedmvs_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.blendedmvs_wai.val.seed},
|
| 7 |
+
transform='${dataset.blendedmvs_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.blendedmvs_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.blendedmvs_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.blendedmvs_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.blendedmvs_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.blendedmvs_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.blendedmvs_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.blendedmvs_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_blendedmvs}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/blendedmvs
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/default.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- resolution_options: default
|
| 3 |
+
- ase_wai: default
|
| 4 |
+
- bedlam_wai: default
|
| 5 |
+
- blendedmvs_wai: default
|
| 6 |
+
- dl3dv_wai: default
|
| 7 |
+
- dtu_wai: default
|
| 8 |
+
- dynamicreplica_wai: default
|
| 9 |
+
- eth3d_wai: default
|
| 10 |
+
- gta_sfm_wai: default
|
| 11 |
+
- matrixcity_wai: default
|
| 12 |
+
- megadepth_wai: default
|
| 13 |
+
- mpsd_wai: default
|
| 14 |
+
- mvs_synth_wai: default
|
| 15 |
+
- paralleldomain4d_wai: default
|
| 16 |
+
- sailvos3d_wai: default
|
| 17 |
+
- scannetpp_wai: default
|
| 18 |
+
- spring_wai: default
|
| 19 |
+
- structured3d_wai: default
|
| 20 |
+
- tav2_wb_wai: default
|
| 21 |
+
- unrealstereo4k_wai: default
|
| 22 |
+
- xrooms_wai: default
|
| 23 |
+
|
| 24 |
+
# Training Set, For example: BlendedMVS(split='train', resolution=(512, 384), transform=...)
|
| 25 |
+
train_dataset: ???
|
| 26 |
+
# Validation Set
|
| 27 |
+
test_dataset: "[null]"
|
| 28 |
+
# Number of workers for dataloader
|
| 29 |
+
num_workers: 12
|
| 30 |
+
# Default resolution for training
|
| 31 |
+
resolution_train: ???
|
| 32 |
+
# Default resolution for validation
|
| 33 |
+
resolution_val: ???
|
| 34 |
+
# Number of views parameter for multi-view datasets
|
| 35 |
+
num_views: 2
|
| 36 |
+
# Use a centered principal point for all images
|
| 37 |
+
principal_point_centered: false
|
| 38 |
+
# Default config for multi-view datasets
|
| 39 |
+
train:
|
| 40 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 41 |
+
variable_num_views: true
|
| 42 |
+
val:
|
| 43 |
+
variable_num_views: false
|
| 44 |
+
test:
|
| 45 |
+
variable_num_views: false
|
configs/dataset/dl3dv_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/dl3dv_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DL3DVWAI(
|
| 3 |
+
split='${dataset.dl3dv_wai.train.split}',
|
| 4 |
+
resolution=${dataset.dl3dv_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dl3dv_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.dl3dv_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.dl3dv_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.dl3dv_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dl3dv_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dl3dv_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dl3dv_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dl3dv_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.dl3dv_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dl3dv_wai.train.covisibility_thres},
|
| 15 |
+
mvs_confidence_filter_thres=${dataset.dl3dv_wai.train.mvs_confidence_filter_thres})"
|
| 16 |
+
split: 'train'
|
| 17 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 18 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 19 |
+
aug_crop: 16
|
| 20 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 21 |
+
data_norm_type: ${model.data_norm_type}
|
| 22 |
+
ROOT: ${root_data_dir}/dl3dv
|
| 23 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 24 |
+
overfit_num_sets: null
|
| 25 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 26 |
+
num_views: ${dataset.num_views}
|
| 27 |
+
covisibility_thres: 0.25
|
| 28 |
+
mvs_confidence_filter_thres: 0.25
|
configs/dataset/dl3dv_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DL3DVWAI(
|
| 3 |
+
split='${dataset.dl3dv_wai.val.split}',
|
| 4 |
+
resolution=${dataset.dl3dv_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dl3dv_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.dl3dv_wai.val.seed},
|
| 7 |
+
transform='${dataset.dl3dv_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.dl3dv_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dl3dv_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dl3dv_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dl3dv_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dl3dv_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.dl3dv_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dl3dv_wai.val.covisibility_thres},
|
| 15 |
+
mvs_confidence_filter_thres=${dataset.dl3dv_wai.val.mvs_confidence_filter_thres})"
|
| 16 |
+
split: 'val'
|
| 17 |
+
dataset_resolution: ${dataset.resolution_val_dl3dv}
|
| 18 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 19 |
+
seed: 777
|
| 20 |
+
transform: 'imgnorm'
|
| 21 |
+
data_norm_type: ${model.data_norm_type}
|
| 22 |
+
ROOT: ${root_data_dir}/dl3dv
|
| 23 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 24 |
+
overfit_num_sets: null
|
| 25 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 26 |
+
num_views: ${dataset.num_views}
|
| 27 |
+
covisibility_thres: 0.25
|
| 28 |
+
mvs_confidence_filter_thres: 0.25
|
configs/dataset/dtu_wai/default.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- test: default
|
configs/dataset/dtu_wai/test/default.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DTUWAI(
|
| 3 |
+
resolution=${dataset.dtu_wai.test.dataset_resolution},
|
| 4 |
+
principal_point_centered=${dataset.dtu_wai.test.principal_point_centered},
|
| 5 |
+
seed=${dataset.dtu_wai.test.seed},
|
| 6 |
+
transform='${dataset.dtu_wai.test.transform}',
|
| 7 |
+
data_norm_type='${dataset.dtu_wai.test.data_norm_type}',
|
| 8 |
+
ROOT='${dataset.dtu_wai.test.ROOT}',
|
| 9 |
+
dataset_metadata_dir='${dataset.dtu_wai.test.dataset_metadata_dir}',
|
| 10 |
+
variable_num_views=${dataset.dtu_wai.test.variable_num_views},
|
| 11 |
+
num_views=${dataset.dtu_wai.test.num_views},
|
| 12 |
+
covisibility_thres=${dataset.dtu_wai.test.covisibility_thres})"
|
| 13 |
+
dataset_resolution: ${dataset.resolution_test_dtu}
|
| 14 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 15 |
+
seed: 777
|
| 16 |
+
transform: 'imgnorm'
|
| 17 |
+
data_norm_type: ${model.data_norm_type}
|
| 18 |
+
ROOT: ${root_data_dir}/dtu
|
| 19 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 20 |
+
variable_num_views: ${dataset.test.variable_num_views}
|
| 21 |
+
num_views: ${dataset.num_views}
|
| 22 |
+
covisibility_thres: 0.25
|
configs/dataset/dynamicreplica_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/dynamicreplica_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DynamicReplicaWAI(
|
| 3 |
+
split='${dataset.dynamicreplica_wai.train.split}',
|
| 4 |
+
resolution=${dataset.dynamicreplica_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dynamicreplica_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.dynamicreplica_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.dynamicreplica_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.dynamicreplica_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dynamicreplica_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dynamicreplica_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dynamicreplica_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dynamicreplica_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.dynamicreplica_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dynamicreplica_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/dynamicreplica
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/dynamicreplica_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"DynamicReplicaWAI(
|
| 3 |
+
split='${dataset.dynamicreplica_wai.val.split}',
|
| 4 |
+
resolution=${dataset.dynamicreplica_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.dynamicreplica_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.dynamicreplica_wai.val.seed},
|
| 7 |
+
transform='${dataset.dynamicreplica_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.dynamicreplica_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.dynamicreplica_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.dynamicreplica_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.dynamicreplica_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.dynamicreplica_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.dynamicreplica_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.dynamicreplica_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_dynamicreplica}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/dynamicreplica
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/eth3d_wai/default.yaml
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- test: default
|
configs/dataset/eth3d_wai/test/default.yaml
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"ETH3DWAI(
|
| 3 |
+
resolution=${dataset.eth3d_wai.test.dataset_resolution},
|
| 4 |
+
principal_point_centered=${dataset.eth3d_wai.test.principal_point_centered},
|
| 5 |
+
seed=${dataset.eth3d_wai.test.seed},
|
| 6 |
+
transform='${dataset.eth3d_wai.test.transform}',
|
| 7 |
+
data_norm_type='${dataset.eth3d_wai.test.data_norm_type}',
|
| 8 |
+
ROOT='${dataset.eth3d_wai.test.ROOT}',
|
| 9 |
+
dataset_metadata_dir='${dataset.eth3d_wai.test.dataset_metadata_dir}',
|
| 10 |
+
variable_num_views=${dataset.eth3d_wai.test.variable_num_views},
|
| 11 |
+
num_views=${dataset.eth3d_wai.test.num_views},
|
| 12 |
+
covisibility_thres=${dataset.eth3d_wai.test.covisibility_thres})"
|
| 13 |
+
dataset_resolution: ${dataset.resolution_test_eth3d}
|
| 14 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 15 |
+
seed: 777
|
| 16 |
+
transform: 'imgnorm'
|
| 17 |
+
data_norm_type: ${model.data_norm_type}
|
| 18 |
+
ROOT: ${root_data_dir}/eth3d
|
| 19 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 20 |
+
variable_num_views: ${dataset.test.variable_num_views}
|
| 21 |
+
num_views: ${dataset.num_views}
|
| 22 |
+
covisibility_thres: 0.025
|
configs/dataset/gta_sfm_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/gta_sfm_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"GTASfMWAI(
|
| 3 |
+
split='${dataset.gta_sfm_wai.train.split}',
|
| 4 |
+
resolution=${dataset.gta_sfm_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.gta_sfm_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.gta_sfm_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.gta_sfm_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.gta_sfm_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.gta_sfm_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.gta_sfm_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.gta_sfm_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.gta_sfm_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.gta_sfm_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.gta_sfm_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/gta_sfm
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/gta_sfm_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"GTASfMWAI(
|
| 3 |
+
split='${dataset.gta_sfm_wai.val.split}',
|
| 4 |
+
resolution=${dataset.gta_sfm_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.gta_sfm_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.gta_sfm_wai.val.seed},
|
| 7 |
+
transform='${dataset.gta_sfm_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.gta_sfm_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.gta_sfm_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.gta_sfm_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.gta_sfm_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.gta_sfm_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.gta_sfm_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.gta_sfm_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_gta_sfm}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/gta_sfm
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/matrixcity_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/matrixcity_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MatrixCityWAI(
|
| 3 |
+
split='${dataset.matrixcity_wai.train.split}',
|
| 4 |
+
resolution=${dataset.matrixcity_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.matrixcity_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.matrixcity_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.matrixcity_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.matrixcity_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.matrixcity_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.matrixcity_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.matrixcity_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.matrixcity_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.matrixcity_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.matrixcity_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/matrixcity
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/matrixcity_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MatrixCityWAI(
|
| 3 |
+
split='${dataset.matrixcity_wai.val.split}',
|
| 4 |
+
resolution=${dataset.matrixcity_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.matrixcity_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.matrixcity_wai.val.seed},
|
| 7 |
+
transform='${dataset.matrixcity_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.matrixcity_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.matrixcity_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.matrixcity_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.matrixcity_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.matrixcity_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.matrixcity_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.matrixcity_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_matrixcity}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/matrixcity
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megadepth_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/megadepth_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MegaDepthWAI(
|
| 3 |
+
split='${dataset.megadepth_wai.train.split}',
|
| 4 |
+
resolution=${dataset.megadepth_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.megadepth_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.megadepth_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.megadepth_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.megadepth_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.megadepth_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.megadepth_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.megadepth_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.megadepth_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.megadepth_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.megadepth_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/megadepth
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megadepth_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MegaDepthWAI(
|
| 3 |
+
split='${dataset.megadepth_wai.val.split}',
|
| 4 |
+
resolution=${dataset.megadepth_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.megadepth_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.megadepth_wai.val.seed},
|
| 7 |
+
transform='${dataset.megadepth_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.megadepth_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.megadepth_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.megadepth_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.megadepth_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.megadepth_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.megadepth_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.megadepth_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_megadepth}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/megadepth
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.25
|
configs/dataset/megatrain_11d_se_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 20 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 21 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 22 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 23 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 24 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 25 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
|
| 27 |
+
# Training Set
|
| 28 |
+
train_dataset:
|
| 29 |
+
"+ 2_450_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 30 |
+
+ 250_000 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 31 |
+
+ 12_400 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 32 |
+
+ 1_675_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 33 |
+
+ 3_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 34 |
+
+ 36_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 36 |
+
+ 22_600 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 37 |
+
+ 800 @ ${dataset.spring_wai.train.dataset_str}
|
| 38 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 39 |
+
+ 200 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 40 |
+
|
| 41 |
+
# Validation Set
|
| 42 |
+
test_dataset:
|
| 43 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 44 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 45 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 46 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 47 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 51 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 53 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_12d_518_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 19 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 20 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 22 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 23 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 25 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 26 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 27 |
+
|
| 28 |
+
# Training Set
|
| 29 |
+
train_dataset:
|
| 30 |
+
"+ 58_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 31 |
+
+ 58_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 32 |
+
+ 45_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 33 |
+
+ 58_000 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 34 |
+
+ 58_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 35 |
+
+ 58_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 36 |
+
+ 58_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 37 |
+
+ 58_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 38 |
+
+ 58_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 39 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 40 |
+
+ 58_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 41 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 42 |
+
|
| 43 |
+
# Validation Set
|
| 44 |
+
test_dataset:
|
| 45 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 46 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 47 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 54 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 56 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_512_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.512_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.512_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.512_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.512_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.512_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.512_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.512_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.512_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.512_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.512_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.512_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.512_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.512_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.512_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 52_500 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 52_500 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 52_500 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 40_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 52_500 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 52_500 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 52_500 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 52_500 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 52_500 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 52_500 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 52_500 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_518_many_ar_24ipg_16g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 52_500 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 52_500 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 52_500 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 40_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 52_500 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 52_500 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 52_500 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 52_500 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 52_500 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 52_500 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 2_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 52_500 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 5_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_13d_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_ase: ${dataset.resolution_options.518_1_00_ar}
|
| 16 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 17 |
+
resolution_val_dl3dv: ${dataset.resolution_options.518_1_77_ar}
|
| 18 |
+
resolution_val_dynamicreplica: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_megadepth: ${dataset.resolution_options.518_1_52_ar}
|
| 20 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
resolution_val_mvs_synth: ${dataset.resolution_options.518_1_77_ar}
|
| 22 |
+
resolution_val_paralleldomain4d: ${dataset.resolution_options.518_1_33_ar}
|
| 23 |
+
resolution_val_sailvos3d: ${dataset.resolution_options.518_1_52_ar}
|
| 24 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 25 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 26 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 27 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 28 |
+
|
| 29 |
+
# Training Set
|
| 30 |
+
train_dataset:
|
| 31 |
+
"+ 420_000 @ ${dataset.ase_wai.train.dataset_str}
|
| 32 |
+
+ 420_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 33 |
+
+ 420_000 @ ${dataset.dl3dv_wai.train.dataset_str}
|
| 34 |
+
+ 320_000 @ ${dataset.dynamicreplica_wai.train.dataset_str}
|
| 35 |
+
+ 420_000 @ ${dataset.megadepth_wai.train.dataset_str}
|
| 36 |
+
+ 420_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 37 |
+
+ 420_000 @ ${dataset.mvs_synth_wai.train.dataset_str}
|
| 38 |
+
+ 420_000 @ ${dataset.paralleldomain4d_wai.train.dataset_str}
|
| 39 |
+
+ 420_000 @ ${dataset.sailvos3d_wai.train.dataset_str}
|
| 40 |
+
+ 420_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 41 |
+
+ 16_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 42 |
+
+ 420_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 43 |
+
+ 44_000 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 44 |
+
|
| 45 |
+
# Validation Set
|
| 46 |
+
test_dataset:
|
| 47 |
+
"+ 4_000 @ ${dataset.ase_wai.val.dataset_str}
|
| 48 |
+
+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 49 |
+
+ 4_000 @ ${dataset.dl3dv_wai.val.dataset_str}
|
| 50 |
+
+ 4_000 @ ${dataset.dynamicreplica_wai.val.dataset_str}
|
| 51 |
+
+ 4_000 @ ${dataset.megadepth_wai.val.dataset_str}
|
| 52 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 53 |
+
+ 4_000 @ ${dataset.mvs_synth_wai.val.dataset_str}
|
| 54 |
+
+ 4_000 @ ${dataset.paralleldomain4d_wai.val.dataset_str}
|
| 55 |
+
+ 4_000 @ ${dataset.sailvos3d_wai.val.dataset_str}
|
| 56 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 57 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 58 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 59 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_6d_518_many_ar_48ipg_64g.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 16 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 18 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 20 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
|
| 22 |
+
# Training Set
|
| 23 |
+
train_dataset:
|
| 24 |
+
"+ 1_120_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 25 |
+
+ 1_120_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 26 |
+
+ 1_120_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 27 |
+
+ 44_000 @ ${dataset.spring_wai.train.dataset_str}
|
| 28 |
+
+ 1_120_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 29 |
+
+ 116_000 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 30 |
+
|
| 31 |
+
# Validation Set
|
| 32 |
+
test_dataset:
|
| 33 |
+
"+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 34 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 36 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 37 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 38 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/megatrain_6d_518_many_ar_48ipg_8g.yaml
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- default
|
| 3 |
+
|
| 4 |
+
# Number of views parameter for the multi-view datasets
|
| 5 |
+
num_views: 4
|
| 6 |
+
|
| 7 |
+
train:
|
| 8 |
+
# If True, the number of views can vary from batch to batch. The maximum number of views is num_views and minimum is 2. (On by default for N-view training)
|
| 9 |
+
variable_num_views: true
|
| 10 |
+
|
| 11 |
+
# Train Resolution
|
| 12 |
+
resolution_train: ${dataset.resolution_options.518_many_ar}
|
| 13 |
+
|
| 14 |
+
# Validation Resolution
|
| 15 |
+
resolution_val_blendedmvs: ${dataset.resolution_options.518_1_33_ar}
|
| 16 |
+
resolution_val_mpsd: ${dataset.resolution_options.518_1_77_ar}
|
| 17 |
+
resolution_val_scannetpp: ${dataset.resolution_options.518_1_52_ar}
|
| 18 |
+
resolution_val_spring: ${dataset.resolution_options.518_1_77_ar}
|
| 19 |
+
resolution_val_tav2_wb: ${dataset.resolution_options.518_1_00_ar}
|
| 20 |
+
resolution_val_unrealstereo4k: ${dataset.resolution_options.518_1_77_ar}
|
| 21 |
+
|
| 22 |
+
# Training Set
|
| 23 |
+
train_dataset:
|
| 24 |
+
"+ 140_000 @ ${dataset.blendedmvs_wai.train.dataset_str}
|
| 25 |
+
+ 140_000 @ ${dataset.mpsd_wai.train.dataset_str}
|
| 26 |
+
+ 140_000 @ ${dataset.scannetpp_wai.train.dataset_str}
|
| 27 |
+
+ 5_500 @ ${dataset.spring_wai.train.dataset_str}
|
| 28 |
+
+ 140_000 @ ${dataset.tav2_wb_wai.train.dataset_str}
|
| 29 |
+
+ 14_500 @ ${dataset.unrealstereo4k_wai.train.dataset_str}"
|
| 30 |
+
|
| 31 |
+
# Validation Set
|
| 32 |
+
test_dataset:
|
| 33 |
+
"+ 4_000 @ ${dataset.blendedmvs_wai.val.dataset_str}
|
| 34 |
+
+ 4_000 @ ${dataset.mpsd_wai.val.dataset_str}
|
| 35 |
+
+ 4_000 @ ${dataset.scannetpp_wai.val.dataset_str}
|
| 36 |
+
+ 500 @ ${dataset.spring_wai.val.dataset_str}
|
| 37 |
+
+ 4_000 @ ${dataset.tav2_wb_wai.val.dataset_str}
|
| 38 |
+
+ 500 @ ${dataset.unrealstereo4k_wai.val.dataset_str}"
|
configs/dataset/mpsd_wai/default.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
defaults:
|
| 2 |
+
- train: default
|
| 3 |
+
- val: default
|
configs/dataset/mpsd_wai/train/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MPSDWAI(
|
| 3 |
+
split='${dataset.mpsd_wai.train.split}',
|
| 4 |
+
resolution=${dataset.mpsd_wai.train.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.mpsd_wai.train.principal_point_centered},
|
| 6 |
+
aug_crop=${dataset.mpsd_wai.train.aug_crop},
|
| 7 |
+
transform='${dataset.mpsd_wai.train.transform}',
|
| 8 |
+
data_norm_type='${dataset.mpsd_wai.train.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.mpsd_wai.train.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.mpsd_wai.train.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.mpsd_wai.train.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.mpsd_wai.train.variable_num_views},
|
| 13 |
+
num_views=${dataset.mpsd_wai.train.num_views},
|
| 14 |
+
covisibility_thres=${dataset.mpsd_wai.train.covisibility_thres})"
|
| 15 |
+
split: 'train'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_train}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
aug_crop: 16
|
| 19 |
+
transform: 'colorjitter+grayscale+gaublur'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/mpsd
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.train.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.15
|
configs/dataset/mpsd_wai/val/default.yaml
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset_str:
|
| 2 |
+
"MPSDWAI(
|
| 3 |
+
split='${dataset.mpsd_wai.val.split}',
|
| 4 |
+
resolution=${dataset.mpsd_wai.val.dataset_resolution},
|
| 5 |
+
principal_point_centered=${dataset.mpsd_wai.val.principal_point_centered},
|
| 6 |
+
seed=${dataset.mpsd_wai.val.seed},
|
| 7 |
+
transform='${dataset.mpsd_wai.val.transform}',
|
| 8 |
+
data_norm_type='${dataset.mpsd_wai.val.data_norm_type}',
|
| 9 |
+
ROOT='${dataset.mpsd_wai.val.ROOT}',
|
| 10 |
+
dataset_metadata_dir='${dataset.mpsd_wai.val.dataset_metadata_dir}',
|
| 11 |
+
overfit_num_sets=${dataset.mpsd_wai.val.overfit_num_sets},
|
| 12 |
+
variable_num_views=${dataset.mpsd_wai.val.variable_num_views},
|
| 13 |
+
num_views=${dataset.mpsd_wai.val.num_views},
|
| 14 |
+
covisibility_thres=${dataset.mpsd_wai.val.covisibility_thres})"
|
| 15 |
+
split: 'val'
|
| 16 |
+
dataset_resolution: ${dataset.resolution_val_mpsd}
|
| 17 |
+
principal_point_centered: ${dataset.principal_point_centered}
|
| 18 |
+
seed: 777
|
| 19 |
+
transform: 'imgnorm'
|
| 20 |
+
data_norm_type: ${model.data_norm_type}
|
| 21 |
+
ROOT: ${root_data_dir}/mpsd
|
| 22 |
+
dataset_metadata_dir: ${mapanything_dataset_metadata_dir}
|
| 23 |
+
overfit_num_sets: null
|
| 24 |
+
variable_num_views: ${dataset.val.variable_num_views}
|
| 25 |
+
num_views: ${dataset.num_views}
|
| 26 |
+
covisibility_thres: 0.15
|