Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- .gitignore +192 -0
- 1gpu.yaml +15 -0
- LICENSE +661 -0
- README.md +206 -7
- README_zh.md +203 -0
- assets/fig_teaser.png +3 -0
- configs/mvdiffusion-joint-ortho-6views.yaml +42 -0
- docker/Dockerfile +56 -0
- docker/README.md +57 -0
- docker/requirements.txt +36 -0
- example_images/14_10_29_489_Tiger_1__1.png +0 -0
- example_images/box.png +0 -0
- example_images/bread.png +0 -0
- example_images/cat.png +0 -0
- example_images/cat_head.png +0 -0
- example_images/chili.png +0 -0
- example_images/duola.png +0 -0
- example_images/halloween.png +0 -0
- example_images/head.png +0 -0
- example_images/kettle.png +0 -0
- example_images/kunkun.png +0 -0
- example_images/milk.png +0 -0
- example_images/owl.png +0 -0
- example_images/poro.png +0 -0
- example_images/pumpkin.png +0 -0
- example_images/skull.png +0 -0
- example_images/stone.png +0 -0
- example_images/teapot.png +0 -0
- example_images/tiger-head-3d-model-obj-stl.png +0 -0
- gradio_app_mv.py +439 -0
- gradio_app_recon.py +438 -0
- instant-nsr-pl/README.md +122 -0
- instant-nsr-pl/configs/neuralangelo-ortho-wmask.yaml +145 -0
- instant-nsr-pl/datasets/__init__.py +16 -0
- instant-nsr-pl/datasets/blender.py +135 -0
- instant-nsr-pl/datasets/colmap.py +332 -0
- instant-nsr-pl/datasets/colmap_utils.py +295 -0
- instant-nsr-pl/datasets/dtu.py +201 -0
- instant-nsr-pl/datasets/fixed_poses/000_back_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_back_left_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_back_right_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_front_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_front_left_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_front_right_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_left_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_right_RT.txt +3 -0
- instant-nsr-pl/datasets/fixed_poses/000_top_RT.txt +3 -0
- instant-nsr-pl/datasets/ortho.py +287 -0
- instant-nsr-pl/datasets/utils.py +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/fig_teaser.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
triton-2.0.0-cp310-cp310-win_amd64.whl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Initially taken from Github's Python gitignore file
|
2 |
+
|
3 |
+
ckpts
|
4 |
+
sam_pt
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# tests and logs
|
14 |
+
tests/fixtures/cached_*_text.txt
|
15 |
+
logs/
|
16 |
+
lightning_logs/
|
17 |
+
lang_code_data/
|
18 |
+
|
19 |
+
# Distribution / packaging
|
20 |
+
.Python
|
21 |
+
build/
|
22 |
+
develop-eggs/
|
23 |
+
dist/
|
24 |
+
downloads/
|
25 |
+
eggs/
|
26 |
+
.eggs/
|
27 |
+
lib/
|
28 |
+
lib64/
|
29 |
+
parts/
|
30 |
+
sdist/
|
31 |
+
var/
|
32 |
+
wheels/
|
33 |
+
*.egg-info/
|
34 |
+
.installed.cfg
|
35 |
+
*.egg
|
36 |
+
MANIFEST
|
37 |
+
|
38 |
+
# PyInstaller
|
39 |
+
# Usually these files are written by a python script from a template
|
40 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
41 |
+
*.manifest
|
42 |
+
*.spec
|
43 |
+
|
44 |
+
# Installer logs
|
45 |
+
pip-log.txt
|
46 |
+
pip-delete-this-directory.txt
|
47 |
+
|
48 |
+
# Unit test / coverage reports
|
49 |
+
htmlcov/
|
50 |
+
.tox/
|
51 |
+
.nox/
|
52 |
+
.coverage
|
53 |
+
.coverage.*
|
54 |
+
.cache
|
55 |
+
nosetests.xml
|
56 |
+
coverage.xml
|
57 |
+
*.cover
|
58 |
+
.hypothesis/
|
59 |
+
.pytest_cache/
|
60 |
+
|
61 |
+
# Translations
|
62 |
+
*.mo
|
63 |
+
*.pot
|
64 |
+
|
65 |
+
# Django stuff:
|
66 |
+
*.log
|
67 |
+
local_settings.py
|
68 |
+
db.sqlite3
|
69 |
+
|
70 |
+
# Flask stuff:
|
71 |
+
instance/
|
72 |
+
.webassets-cache
|
73 |
+
|
74 |
+
# Scrapy stuff:
|
75 |
+
.scrapy
|
76 |
+
|
77 |
+
# Sphinx documentation
|
78 |
+
docs/_build/
|
79 |
+
|
80 |
+
# PyBuilder
|
81 |
+
target/
|
82 |
+
|
83 |
+
# Jupyter Notebook
|
84 |
+
.ipynb_checkpoints
|
85 |
+
|
86 |
+
# IPython
|
87 |
+
profile_default/
|
88 |
+
ipython_config.py
|
89 |
+
|
90 |
+
# pyenv
|
91 |
+
.python-version
|
92 |
+
|
93 |
+
# celery beat schedule file
|
94 |
+
celerybeat-schedule
|
95 |
+
|
96 |
+
# SageMath parsed files
|
97 |
+
*.sage.py
|
98 |
+
|
99 |
+
# Environments
|
100 |
+
.env
|
101 |
+
.venv
|
102 |
+
env/
|
103 |
+
venv/
|
104 |
+
ENV/
|
105 |
+
env.bak/
|
106 |
+
venv.bak/
|
107 |
+
|
108 |
+
# Spyder project settings
|
109 |
+
.spyderproject
|
110 |
+
.spyproject
|
111 |
+
|
112 |
+
# Rope project settings
|
113 |
+
.ropeproject
|
114 |
+
|
115 |
+
# mkdocs documentation
|
116 |
+
/site
|
117 |
+
|
118 |
+
# mypy
|
119 |
+
.mypy_cache/
|
120 |
+
.dmypy.json
|
121 |
+
dmypy.json
|
122 |
+
|
123 |
+
# Pyre type checker
|
124 |
+
.pyre/
|
125 |
+
|
126 |
+
# vscode
|
127 |
+
.vs
|
128 |
+
.vscode
|
129 |
+
|
130 |
+
# Pycharm
|
131 |
+
.idea
|
132 |
+
|
133 |
+
# TF code
|
134 |
+
tensorflow_code
|
135 |
+
|
136 |
+
# Models
|
137 |
+
proc_data
|
138 |
+
|
139 |
+
# examples
|
140 |
+
runs
|
141 |
+
/runs_old
|
142 |
+
/wandb
|
143 |
+
/examples/runs
|
144 |
+
/examples/**/*.args
|
145 |
+
/examples/rag/sweep
|
146 |
+
|
147 |
+
# data
|
148 |
+
/data
|
149 |
+
serialization_dir
|
150 |
+
|
151 |
+
# emacs
|
152 |
+
*.*~
|
153 |
+
debug.env
|
154 |
+
|
155 |
+
# vim
|
156 |
+
.*.swp
|
157 |
+
|
158 |
+
#ctags
|
159 |
+
tags
|
160 |
+
|
161 |
+
# pre-commit
|
162 |
+
.pre-commit*
|
163 |
+
|
164 |
+
# .lock
|
165 |
+
*.lock
|
166 |
+
|
167 |
+
# DS_Store (MacOS)
|
168 |
+
.DS_Store
|
169 |
+
# RL pipelines may produce mp4 outputs
|
170 |
+
*.mp4
|
171 |
+
|
172 |
+
# dependencies
|
173 |
+
/transformers
|
174 |
+
|
175 |
+
# ruff
|
176 |
+
.ruff_cache
|
177 |
+
|
178 |
+
# ckpts
|
179 |
+
*.ckpt
|
180 |
+
|
181 |
+
outputs/*
|
182 |
+
|
183 |
+
NeuS/exp/*
|
184 |
+
NeuS/test_scenes/*
|
185 |
+
NeuS/mesh2tex/*
|
186 |
+
neus_configs
|
187 |
+
vast/*
|
188 |
+
render_results
|
189 |
+
experiments/*
|
190 |
+
ckpts/*
|
191 |
+
neus/*
|
192 |
+
instant-nsr-pl/exp/*
|
1gpu.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
compute_environment: LOCAL_MACHINE
|
2 |
+
distributed_type: 'NO'
|
3 |
+
downcast_bf16: 'no'
|
4 |
+
gpu_ids: '0'
|
5 |
+
machine_rank: 0
|
6 |
+
main_training_function: main
|
7 |
+
mixed_precision: 'no'
|
8 |
+
num_machines: 1
|
9 |
+
num_processes: 1
|
10 |
+
rdzv_backend: static
|
11 |
+
same_network: true
|
12 |
+
tpu_env: []
|
13 |
+
tpu_use_cluster: false
|
14 |
+
tpu_use_sudo: false
|
15 |
+
use_cpu: false
|
LICENSE
ADDED
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU AFFERO GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 19 November 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU Affero General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works, specifically designed to ensure
|
12 |
+
cooperation with the community in the case of network server software.
|
13 |
+
|
14 |
+
The licenses for most software and other practical works are designed
|
15 |
+
to take away your freedom to share and change the works. By contrast,
|
16 |
+
our General Public Licenses are intended to guarantee your freedom to
|
17 |
+
share and change all versions of a program--to make sure it remains free
|
18 |
+
software for all its users.
|
19 |
+
|
20 |
+
When we speak of free software, we are referring to freedom, not
|
21 |
+
price. Our General Public Licenses are designed to make sure that you
|
22 |
+
have the freedom to distribute copies of free software (and charge for
|
23 |
+
them if you wish), that you receive source code or can get it if you
|
24 |
+
want it, that you can change the software or use pieces of it in new
|
25 |
+
free programs, and that you know you can do these things.
|
26 |
+
|
27 |
+
Developers that use our General Public Licenses protect your rights
|
28 |
+
with two steps: (1) assert copyright on the software, and (2) offer
|
29 |
+
you this License which gives you legal permission to copy, distribute
|
30 |
+
and/or modify the software.
|
31 |
+
|
32 |
+
A secondary benefit of defending all users' freedom is that
|
33 |
+
improvements made in alternate versions of the program, if they
|
34 |
+
receive widespread use, become available for other developers to
|
35 |
+
incorporate. Many developers of free software are heartened and
|
36 |
+
encouraged by the resulting cooperation. However, in the case of
|
37 |
+
software used on network servers, this result may fail to come about.
|
38 |
+
The GNU General Public License permits making a modified version and
|
39 |
+
letting the public access it on a server without ever releasing its
|
40 |
+
source code to the public.
|
41 |
+
|
42 |
+
The GNU Affero General Public License is designed specifically to
|
43 |
+
ensure that, in such cases, the modified source code becomes available
|
44 |
+
to the community. It requires the operator of a network server to
|
45 |
+
provide the source code of the modified version running there to the
|
46 |
+
users of that server. Therefore, public use of a modified version, on
|
47 |
+
a publicly accessible server, gives the public access to the source
|
48 |
+
code of the modified version.
|
49 |
+
|
50 |
+
An older license, called the Affero General Public License and
|
51 |
+
published by Affero, was designed to accomplish similar goals. This is
|
52 |
+
a different license, not a version of the Affero GPL, but Affero has
|
53 |
+
released a new version of the Affero GPL which permits relicensing under
|
54 |
+
this license.
|
55 |
+
|
56 |
+
The precise terms and conditions for copying, distribution and
|
57 |
+
modification follow.
|
58 |
+
|
59 |
+
TERMS AND CONDITIONS
|
60 |
+
|
61 |
+
0. Definitions.
|
62 |
+
|
63 |
+
"This License" refers to version 3 of the GNU Affero General Public License.
|
64 |
+
|
65 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
66 |
+
works, such as semiconductor masks.
|
67 |
+
|
68 |
+
"The Program" refers to any copyrightable work licensed under this
|
69 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
70 |
+
"recipients" may be individuals or organizations.
|
71 |
+
|
72 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
73 |
+
in a fashion requiring copyright permission, other than the making of an
|
74 |
+
exact copy. The resulting work is called a "modified version" of the
|
75 |
+
earlier work or a work "based on" the earlier work.
|
76 |
+
|
77 |
+
A "covered work" means either the unmodified Program or a work based
|
78 |
+
on the Program.
|
79 |
+
|
80 |
+
To "propagate" a work means to do anything with it that, without
|
81 |
+
permission, would make you directly or secondarily liable for
|
82 |
+
infringement under applicable copyright law, except executing it on a
|
83 |
+
computer or modifying a private copy. Propagation includes copying,
|
84 |
+
distribution (with or without modification), making available to the
|
85 |
+
public, and in some countries other activities as well.
|
86 |
+
|
87 |
+
To "convey" a work means any kind of propagation that enables other
|
88 |
+
parties to make or receive copies. Mere interaction with a user through
|
89 |
+
a computer network, with no transfer of a copy, is not conveying.
|
90 |
+
|
91 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
92 |
+
to the extent that it includes a convenient and prominently visible
|
93 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
94 |
+
tells the user that there is no warranty for the work (except to the
|
95 |
+
extent that warranties are provided), that licensees may convey the
|
96 |
+
work under this License, and how to view a copy of this License. If
|
97 |
+
the interface presents a list of user commands or options, such as a
|
98 |
+
menu, a prominent item in the list meets this criterion.
|
99 |
+
|
100 |
+
1. Source Code.
|
101 |
+
|
102 |
+
The "source code" for a work means the preferred form of the work
|
103 |
+
for making modifications to it. "Object code" means any non-source
|
104 |
+
form of a work.
|
105 |
+
|
106 |
+
A "Standard Interface" means an interface that either is an official
|
107 |
+
standard defined by a recognized standards body, or, in the case of
|
108 |
+
interfaces specified for a particular programming language, one that
|
109 |
+
is widely used among developers working in that language.
|
110 |
+
|
111 |
+
The "System Libraries" of an executable work include anything, other
|
112 |
+
than the work as a whole, that (a) is included in the normal form of
|
113 |
+
packaging a Major Component, but which is not part of that Major
|
114 |
+
Component, and (b) serves only to enable use of the work with that
|
115 |
+
Major Component, or to implement a Standard Interface for which an
|
116 |
+
implementation is available to the public in source code form. A
|
117 |
+
"Major Component", in this context, means a major essential component
|
118 |
+
(kernel, window system, and so on) of the specific operating system
|
119 |
+
(if any) on which the executable work runs, or a compiler used to
|
120 |
+
produce the work, or an object code interpreter used to run it.
|
121 |
+
|
122 |
+
The "Corresponding Source" for a work in object code form means all
|
123 |
+
the source code needed to generate, install, and (for an executable
|
124 |
+
work) run the object code and to modify the work, including scripts to
|
125 |
+
control those activities. However, it does not include the work's
|
126 |
+
System Libraries, or general-purpose tools or generally available free
|
127 |
+
programs which are used unmodified in performing those activities but
|
128 |
+
which are not part of the work. For example, Corresponding Source
|
129 |
+
includes interface definition files associated with source files for
|
130 |
+
the work, and the source code for shared libraries and dynamically
|
131 |
+
linked subprograms that the work is specifically designed to require,
|
132 |
+
such as by intimate data communication or control flow between those
|
133 |
+
subprograms and other parts of the work.
|
134 |
+
|
135 |
+
The Corresponding Source need not include anything that users
|
136 |
+
can regenerate automatically from other parts of the Corresponding
|
137 |
+
Source.
|
138 |
+
|
139 |
+
The Corresponding Source for a work in source code form is that
|
140 |
+
same work.
|
141 |
+
|
142 |
+
2. Basic Permissions.
|
143 |
+
|
144 |
+
All rights granted under this License are granted for the term of
|
145 |
+
copyright on the Program, and are irrevocable provided the stated
|
146 |
+
conditions are met. This License explicitly affirms your unlimited
|
147 |
+
permission to run the unmodified Program. The output from running a
|
148 |
+
covered work is covered by this License only if the output, given its
|
149 |
+
content, constitutes a covered work. This License acknowledges your
|
150 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
151 |
+
|
152 |
+
You may make, run and propagate covered works that you do not
|
153 |
+
convey, without conditions so long as your license otherwise remains
|
154 |
+
in force. You may convey covered works to others for the sole purpose
|
155 |
+
of having them make modifications exclusively for you, or provide you
|
156 |
+
with facilities for running those works, provided that you comply with
|
157 |
+
the terms of this License in conveying all material for which you do
|
158 |
+
not control copyright. Those thus making or running the covered works
|
159 |
+
for you must do so exclusively on your behalf, under your direction
|
160 |
+
and control, on terms that prohibit them from making any copies of
|
161 |
+
your copyrighted material outside their relationship with you.
|
162 |
+
|
163 |
+
Conveying under any other circumstances is permitted solely under
|
164 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
165 |
+
makes it unnecessary.
|
166 |
+
|
167 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
168 |
+
|
169 |
+
No covered work shall be deemed part of an effective technological
|
170 |
+
measure under any applicable law fulfilling obligations under article
|
171 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
172 |
+
similar laws prohibiting or restricting circumvention of such
|
173 |
+
measures.
|
174 |
+
|
175 |
+
When you convey a covered work, you waive any legal power to forbid
|
176 |
+
circumvention of technological measures to the extent such circumvention
|
177 |
+
is effected by exercising rights under this License with respect to
|
178 |
+
the covered work, and you disclaim any intention to limit operation or
|
179 |
+
modification of the work as a means of enforcing, against the work's
|
180 |
+
users, your or third parties' legal rights to forbid circumvention of
|
181 |
+
technological measures.
|
182 |
+
|
183 |
+
4. Conveying Verbatim Copies.
|
184 |
+
|
185 |
+
You may convey verbatim copies of the Program's source code as you
|
186 |
+
receive it, in any medium, provided that you conspicuously and
|
187 |
+
appropriately publish on each copy an appropriate copyright notice;
|
188 |
+
keep intact all notices stating that this License and any
|
189 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
190 |
+
keep intact all notices of the absence of any warranty; and give all
|
191 |
+
recipients a copy of this License along with the Program.
|
192 |
+
|
193 |
+
You may charge any price or no price for each copy that you convey,
|
194 |
+
and you may offer support or warranty protection for a fee.
|
195 |
+
|
196 |
+
5. Conveying Modified Source Versions.
|
197 |
+
|
198 |
+
You may convey a work based on the Program, or the modifications to
|
199 |
+
produce it from the Program, in the form of source code under the
|
200 |
+
terms of section 4, provided that you also meet all of these conditions:
|
201 |
+
|
202 |
+
a) The work must carry prominent notices stating that you modified
|
203 |
+
it, and giving a relevant date.
|
204 |
+
|
205 |
+
b) The work must carry prominent notices stating that it is
|
206 |
+
released under this License and any conditions added under section
|
207 |
+
7. This requirement modifies the requirement in section 4 to
|
208 |
+
"keep intact all notices".
|
209 |
+
|
210 |
+
c) You must license the entire work, as a whole, under this
|
211 |
+
License to anyone who comes into possession of a copy. This
|
212 |
+
License will therefore apply, along with any applicable section 7
|
213 |
+
additional terms, to the whole of the work, and all its parts,
|
214 |
+
regardless of how they are packaged. This License gives no
|
215 |
+
permission to license the work in any other way, but it does not
|
216 |
+
invalidate such permission if you have separately received it.
|
217 |
+
|
218 |
+
d) If the work has interactive user interfaces, each must display
|
219 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
220 |
+
interfaces that do not display Appropriate Legal Notices, your
|
221 |
+
work need not make them do so.
|
222 |
+
|
223 |
+
A compilation of a covered work with other separate and independent
|
224 |
+
works, which are not by their nature extensions of the covered work,
|
225 |
+
and which are not combined with it such as to form a larger program,
|
226 |
+
in or on a volume of a storage or distribution medium, is called an
|
227 |
+
"aggregate" if the compilation and its resulting copyright are not
|
228 |
+
used to limit the access or legal rights of the compilation's users
|
229 |
+
beyond what the individual works permit. Inclusion of a covered work
|
230 |
+
in an aggregate does not cause this License to apply to the other
|
231 |
+
parts of the aggregate.
|
232 |
+
|
233 |
+
6. Conveying Non-Source Forms.
|
234 |
+
|
235 |
+
You may convey a covered work in object code form under the terms
|
236 |
+
of sections 4 and 5, provided that you also convey the
|
237 |
+
machine-readable Corresponding Source under the terms of this License,
|
238 |
+
in one of these ways:
|
239 |
+
|
240 |
+
a) Convey the object code in, or embodied in, a physical product
|
241 |
+
(including a physical distribution medium), accompanied by the
|
242 |
+
Corresponding Source fixed on a durable physical medium
|
243 |
+
customarily used for software interchange.
|
244 |
+
|
245 |
+
b) Convey the object code in, or embodied in, a physical product
|
246 |
+
(including a physical distribution medium), accompanied by a
|
247 |
+
written offer, valid for at least three years and valid for as
|
248 |
+
long as you offer spare parts or customer support for that product
|
249 |
+
model, to give anyone who possesses the object code either (1) a
|
250 |
+
copy of the Corresponding Source for all the software in the
|
251 |
+
product that is covered by this License, on a durable physical
|
252 |
+
medium customarily used for software interchange, for a price no
|
253 |
+
more than your reasonable cost of physically performing this
|
254 |
+
conveying of source, or (2) access to copy the
|
255 |
+
Corresponding Source from a network server at no charge.
|
256 |
+
|
257 |
+
c) Convey individual copies of the object code with a copy of the
|
258 |
+
written offer to provide the Corresponding Source. This
|
259 |
+
alternative is allowed only occasionally and noncommercially, and
|
260 |
+
only if you received the object code with such an offer, in accord
|
261 |
+
with subsection 6b.
|
262 |
+
|
263 |
+
d) Convey the object code by offering access from a designated
|
264 |
+
place (gratis or for a charge), and offer equivalent access to the
|
265 |
+
Corresponding Source in the same way through the same place at no
|
266 |
+
further charge. You need not require recipients to copy the
|
267 |
+
Corresponding Source along with the object code. If the place to
|
268 |
+
copy the object code is a network server, the Corresponding Source
|
269 |
+
may be on a different server (operated by you or a third party)
|
270 |
+
that supports equivalent copying facilities, provided you maintain
|
271 |
+
clear directions next to the object code saying where to find the
|
272 |
+
Corresponding Source. Regardless of what server hosts the
|
273 |
+
Corresponding Source, you remain obligated to ensure that it is
|
274 |
+
available for as long as needed to satisfy these requirements.
|
275 |
+
|
276 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
277 |
+
you inform other peers where the object code and Corresponding
|
278 |
+
Source of the work are being offered to the general public at no
|
279 |
+
charge under subsection 6d.
|
280 |
+
|
281 |
+
A separable portion of the object code, whose source code is excluded
|
282 |
+
from the Corresponding Source as a System Library, need not be
|
283 |
+
included in conveying the object code work.
|
284 |
+
|
285 |
+
A "User Product" is either (1) a "consumer product", which means any
|
286 |
+
tangible personal property which is normally used for personal, family,
|
287 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
288 |
+
into a dwelling. In determining whether a product is a consumer product,
|
289 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
290 |
+
product received by a particular user, "normally used" refers to a
|
291 |
+
typical or common use of that class of product, regardless of the status
|
292 |
+
of the particular user or of the way in which the particular user
|
293 |
+
actually uses, or expects or is expected to use, the product. A product
|
294 |
+
is a consumer product regardless of whether the product has substantial
|
295 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
296 |
+
the only significant mode of use of the product.
|
297 |
+
|
298 |
+
"Installation Information" for a User Product means any methods,
|
299 |
+
procedures, authorization keys, or other information required to install
|
300 |
+
and execute modified versions of a covered work in that User Product from
|
301 |
+
a modified version of its Corresponding Source. The information must
|
302 |
+
suffice to ensure that the continued functioning of the modified object
|
303 |
+
code is in no case prevented or interfered with solely because
|
304 |
+
modification has been made.
|
305 |
+
|
306 |
+
If you convey an object code work under this section in, or with, or
|
307 |
+
specifically for use in, a User Product, and the conveying occurs as
|
308 |
+
part of a transaction in which the right of possession and use of the
|
309 |
+
User Product is transferred to the recipient in perpetuity or for a
|
310 |
+
fixed term (regardless of how the transaction is characterized), the
|
311 |
+
Corresponding Source conveyed under this section must be accompanied
|
312 |
+
by the Installation Information. But this requirement does not apply
|
313 |
+
if neither you nor any third party retains the ability to install
|
314 |
+
modified object code on the User Product (for example, the work has
|
315 |
+
been installed in ROM).
|
316 |
+
|
317 |
+
The requirement to provide Installation Information does not include a
|
318 |
+
requirement to continue to provide support service, warranty, or updates
|
319 |
+
for a work that has been modified or installed by the recipient, or for
|
320 |
+
the User Product in which it has been modified or installed. Access to a
|
321 |
+
network may be denied when the modification itself materially and
|
322 |
+
adversely affects the operation of the network or violates the rules and
|
323 |
+
protocols for communication across the network.
|
324 |
+
|
325 |
+
Corresponding Source conveyed, and Installation Information provided,
|
326 |
+
in accord with this section must be in a format that is publicly
|
327 |
+
documented (and with an implementation available to the public in
|
328 |
+
source code form), and must require no special password or key for
|
329 |
+
unpacking, reading or copying.
|
330 |
+
|
331 |
+
7. Additional Terms.
|
332 |
+
|
333 |
+
"Additional permissions" are terms that supplement the terms of this
|
334 |
+
License by making exceptions from one or more of its conditions.
|
335 |
+
Additional permissions that are applicable to the entire Program shall
|
336 |
+
be treated as though they were included in this License, to the extent
|
337 |
+
that they are valid under applicable law. If additional permissions
|
338 |
+
apply only to part of the Program, that part may be used separately
|
339 |
+
under those permissions, but the entire Program remains governed by
|
340 |
+
this License without regard to the additional permissions.
|
341 |
+
|
342 |
+
When you convey a copy of a covered work, you may at your option
|
343 |
+
remove any additional permissions from that copy, or from any part of
|
344 |
+
it. (Additional permissions may be written to require their own
|
345 |
+
removal in certain cases when you modify the work.) You may place
|
346 |
+
additional permissions on material, added by you to a covered work,
|
347 |
+
for which you have or can give appropriate copyright permission.
|
348 |
+
|
349 |
+
Notwithstanding any other provision of this License, for material you
|
350 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
351 |
+
that material) supplement the terms of this License with terms:
|
352 |
+
|
353 |
+
a) Disclaiming warranty or limiting liability differently from the
|
354 |
+
terms of sections 15 and 16 of this License; or
|
355 |
+
|
356 |
+
b) Requiring preservation of specified reasonable legal notices or
|
357 |
+
author attributions in that material or in the Appropriate Legal
|
358 |
+
Notices displayed by works containing it; or
|
359 |
+
|
360 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
361 |
+
requiring that modified versions of such material be marked in
|
362 |
+
reasonable ways as different from the original version; or
|
363 |
+
|
364 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
365 |
+
authors of the material; or
|
366 |
+
|
367 |
+
e) Declining to grant rights under trademark law for use of some
|
368 |
+
trade names, trademarks, or service marks; or
|
369 |
+
|
370 |
+
f) Requiring indemnification of licensors and authors of that
|
371 |
+
material by anyone who conveys the material (or modified versions of
|
372 |
+
it) with contractual assumptions of liability to the recipient, for
|
373 |
+
any liability that these contractual assumptions directly impose on
|
374 |
+
those licensors and authors.
|
375 |
+
|
376 |
+
All other non-permissive additional terms are considered "further
|
377 |
+
restrictions" within the meaning of section 10. If the Program as you
|
378 |
+
received it, or any part of it, contains a notice stating that it is
|
379 |
+
governed by this License along with a term that is a further
|
380 |
+
restriction, you may remove that term. If a license document contains
|
381 |
+
a further restriction but permits relicensing or conveying under this
|
382 |
+
License, you may add to a covered work material governed by the terms
|
383 |
+
of that license document, provided that the further restriction does
|
384 |
+
not survive such relicensing or conveying.
|
385 |
+
|
386 |
+
If you add terms to a covered work in accord with this section, you
|
387 |
+
must place, in the relevant source files, a statement of the
|
388 |
+
additional terms that apply to those files, or a notice indicating
|
389 |
+
where to find the applicable terms.
|
390 |
+
|
391 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
392 |
+
form of a separately written license, or stated as exceptions;
|
393 |
+
the above requirements apply either way.
|
394 |
+
|
395 |
+
8. Termination.
|
396 |
+
|
397 |
+
You may not propagate or modify a covered work except as expressly
|
398 |
+
provided under this License. Any attempt otherwise to propagate or
|
399 |
+
modify it is void, and will automatically terminate your rights under
|
400 |
+
this License (including any patent licenses granted under the third
|
401 |
+
paragraph of section 11).
|
402 |
+
|
403 |
+
However, if you cease all violation of this License, then your
|
404 |
+
license from a particular copyright holder is reinstated (a)
|
405 |
+
provisionally, unless and until the copyright holder explicitly and
|
406 |
+
finally terminates your license, and (b) permanently, if the copyright
|
407 |
+
holder fails to notify you of the violation by some reasonable means
|
408 |
+
prior to 60 days after the cessation.
|
409 |
+
|
410 |
+
Moreover, your license from a particular copyright holder is
|
411 |
+
reinstated permanently if the copyright holder notifies you of the
|
412 |
+
violation by some reasonable means, this is the first time you have
|
413 |
+
received notice of violation of this License (for any work) from that
|
414 |
+
copyright holder, and you cure the violation prior to 30 days after
|
415 |
+
your receipt of the notice.
|
416 |
+
|
417 |
+
Termination of your rights under this section does not terminate the
|
418 |
+
licenses of parties who have received copies or rights from you under
|
419 |
+
this License. If your rights have been terminated and not permanently
|
420 |
+
reinstated, you do not qualify to receive new licenses for the same
|
421 |
+
material under section 10.
|
422 |
+
|
423 |
+
9. Acceptance Not Required for Having Copies.
|
424 |
+
|
425 |
+
You are not required to accept this License in order to receive or
|
426 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
427 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
428 |
+
to receive a copy likewise does not require acceptance. However,
|
429 |
+
nothing other than this License grants you permission to propagate or
|
430 |
+
modify any covered work. These actions infringe copyright if you do
|
431 |
+
not accept this License. Therefore, by modifying or propagating a
|
432 |
+
covered work, you indicate your acceptance of this License to do so.
|
433 |
+
|
434 |
+
10. Automatic Licensing of Downstream Recipients.
|
435 |
+
|
436 |
+
Each time you convey a covered work, the recipient automatically
|
437 |
+
receives a license from the original licensors, to run, modify and
|
438 |
+
propagate that work, subject to this License. You are not responsible
|
439 |
+
for enforcing compliance by third parties with this License.
|
440 |
+
|
441 |
+
An "entity transaction" is a transaction transferring control of an
|
442 |
+
organization, or substantially all assets of one, or subdividing an
|
443 |
+
organization, or merging organizations. If propagation of a covered
|
444 |
+
work results from an entity transaction, each party to that
|
445 |
+
transaction who receives a copy of the work also receives whatever
|
446 |
+
licenses to the work the party's predecessor in interest had or could
|
447 |
+
give under the previous paragraph, plus a right to possession of the
|
448 |
+
Corresponding Source of the work from the predecessor in interest, if
|
449 |
+
the predecessor has it or can get it with reasonable efforts.
|
450 |
+
|
451 |
+
You may not impose any further restrictions on the exercise of the
|
452 |
+
rights granted or affirmed under this License. For example, you may
|
453 |
+
not impose a license fee, royalty, or other charge for exercise of
|
454 |
+
rights granted under this License, and you may not initiate litigation
|
455 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
456 |
+
any patent claim is infringed by making, using, selling, offering for
|
457 |
+
sale, or importing the Program or any portion of it.
|
458 |
+
|
459 |
+
11. Patents.
|
460 |
+
|
461 |
+
A "contributor" is a copyright holder who authorizes use under this
|
462 |
+
License of the Program or a work on which the Program is based. The
|
463 |
+
work thus licensed is called the contributor's "contributor version".
|
464 |
+
|
465 |
+
A contributor's "essential patent claims" are all patent claims
|
466 |
+
owned or controlled by the contributor, whether already acquired or
|
467 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
468 |
+
by this License, of making, using, or selling its contributor version,
|
469 |
+
but do not include claims that would be infringed only as a
|
470 |
+
consequence of further modification of the contributor version. For
|
471 |
+
purposes of this definition, "control" includes the right to grant
|
472 |
+
patent sublicenses in a manner consistent with the requirements of
|
473 |
+
this License.
|
474 |
+
|
475 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
476 |
+
patent license under the contributor's essential patent claims, to
|
477 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
478 |
+
propagate the contents of its contributor version.
|
479 |
+
|
480 |
+
In the following three paragraphs, a "patent license" is any express
|
481 |
+
agreement or commitment, however denominated, not to enforce a patent
|
482 |
+
(such as an express permission to practice a patent or covenant not to
|
483 |
+
sue for patent infringement). To "grant" such a patent license to a
|
484 |
+
party means to make such an agreement or commitment not to enforce a
|
485 |
+
patent against the party.
|
486 |
+
|
487 |
+
If you convey a covered work, knowingly relying on a patent license,
|
488 |
+
and the Corresponding Source of the work is not available for anyone
|
489 |
+
to copy, free of charge and under the terms of this License, through a
|
490 |
+
publicly available network server or other readily accessible means,
|
491 |
+
then you must either (1) cause the Corresponding Source to be so
|
492 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
493 |
+
patent license for this particular work, or (3) arrange, in a manner
|
494 |
+
consistent with the requirements of this License, to extend the patent
|
495 |
+
license to downstream recipients. "Knowingly relying" means you have
|
496 |
+
actual knowledge that, but for the patent license, your conveying the
|
497 |
+
covered work in a country, or your recipient's use of the covered work
|
498 |
+
in a country, would infringe one or more identifiable patents in that
|
499 |
+
country that you have reason to believe are valid.
|
500 |
+
|
501 |
+
If, pursuant to or in connection with a single transaction or
|
502 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
503 |
+
covered work, and grant a patent license to some of the parties
|
504 |
+
receiving the covered work authorizing them to use, propagate, modify
|
505 |
+
or convey a specific copy of the covered work, then the patent license
|
506 |
+
you grant is automatically extended to all recipients of the covered
|
507 |
+
work and works based on it.
|
508 |
+
|
509 |
+
A patent license is "discriminatory" if it does not include within
|
510 |
+
the scope of its coverage, prohibits the exercise of, or is
|
511 |
+
conditioned on the non-exercise of one or more of the rights that are
|
512 |
+
specifically granted under this License. You may not convey a covered
|
513 |
+
work if you are a party to an arrangement with a third party that is
|
514 |
+
in the business of distributing software, under which you make payment
|
515 |
+
to the third party based on the extent of your activity of conveying
|
516 |
+
the work, and under which the third party grants, to any of the
|
517 |
+
parties who would receive the covered work from you, a discriminatory
|
518 |
+
patent license (a) in connection with copies of the covered work
|
519 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
520 |
+
for and in connection with specific products or compilations that
|
521 |
+
contain the covered work, unless you entered into that arrangement,
|
522 |
+
or that patent license was granted, prior to 28 March 2007.
|
523 |
+
|
524 |
+
Nothing in this License shall be construed as excluding or limiting
|
525 |
+
any implied license or other defenses to infringement that may
|
526 |
+
otherwise be available to you under applicable patent law.
|
527 |
+
|
528 |
+
12. No Surrender of Others' Freedom.
|
529 |
+
|
530 |
+
If conditions are imposed on you (whether by court order, agreement or
|
531 |
+
otherwise) that contradict the conditions of this License, they do not
|
532 |
+
excuse you from the conditions of this License. If you cannot convey a
|
533 |
+
covered work so as to satisfy simultaneously your obligations under this
|
534 |
+
License and any other pertinent obligations, then as a consequence you may
|
535 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
536 |
+
to collect a royalty for further conveying from those to whom you convey
|
537 |
+
the Program, the only way you could satisfy both those terms and this
|
538 |
+
License would be to refrain entirely from conveying the Program.
|
539 |
+
|
540 |
+
13. Remote Network Interaction; Use with the GNU General Public License.
|
541 |
+
|
542 |
+
Notwithstanding any other provision of this License, if you modify the
|
543 |
+
Program, your modified version must prominently offer all users
|
544 |
+
interacting with it remotely through a computer network (if your version
|
545 |
+
supports such interaction) an opportunity to receive the Corresponding
|
546 |
+
Source of your version by providing access to the Corresponding Source
|
547 |
+
from a network server at no charge, through some standard or customary
|
548 |
+
means of facilitating copying of software. This Corresponding Source
|
549 |
+
shall include the Corresponding Source for any work covered by version 3
|
550 |
+
of the GNU General Public License that is incorporated pursuant to the
|
551 |
+
following paragraph.
|
552 |
+
|
553 |
+
Notwithstanding any other provision of this License, you have
|
554 |
+
permission to link or combine any covered work with a work licensed
|
555 |
+
under version 3 of the GNU General Public License into a single
|
556 |
+
combined work, and to convey the resulting work. The terms of this
|
557 |
+
License will continue to apply to the part which is the covered work,
|
558 |
+
but the work with which it is combined will remain governed by version
|
559 |
+
3 of the GNU General Public License.
|
560 |
+
|
561 |
+
14. Revised Versions of this License.
|
562 |
+
|
563 |
+
The Free Software Foundation may publish revised and/or new versions of
|
564 |
+
the GNU Affero General Public License from time to time. Such new versions
|
565 |
+
will be similar in spirit to the present version, but may differ in detail to
|
566 |
+
address new problems or concerns.
|
567 |
+
|
568 |
+
Each version is given a distinguishing version number. If the
|
569 |
+
Program specifies that a certain numbered version of the GNU Affero General
|
570 |
+
Public License "or any later version" applies to it, you have the
|
571 |
+
option of following the terms and conditions either of that numbered
|
572 |
+
version or of any later version published by the Free Software
|
573 |
+
Foundation. If the Program does not specify a version number of the
|
574 |
+
GNU Affero General Public License, you may choose any version ever published
|
575 |
+
by the Free Software Foundation.
|
576 |
+
|
577 |
+
If the Program specifies that a proxy can decide which future
|
578 |
+
versions of the GNU Affero General Public License can be used, that proxy's
|
579 |
+
public statement of acceptance of a version permanently authorizes you
|
580 |
+
to choose that version for the Program.
|
581 |
+
|
582 |
+
Later license versions may give you additional or different
|
583 |
+
permissions. However, no additional obligations are imposed on any
|
584 |
+
author or copyright holder as a result of your choosing to follow a
|
585 |
+
later version.
|
586 |
+
|
587 |
+
15. Disclaimer of Warranty.
|
588 |
+
|
589 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
590 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
591 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
592 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
593 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
594 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
595 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
596 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
597 |
+
|
598 |
+
16. Limitation of Liability.
|
599 |
+
|
600 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
601 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
602 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
603 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
604 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
605 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
606 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
607 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
608 |
+
SUCH DAMAGES.
|
609 |
+
|
610 |
+
17. Interpretation of Sections 15 and 16.
|
611 |
+
|
612 |
+
If the disclaimer of warranty and limitation of liability provided
|
613 |
+
above cannot be given local legal effect according to their terms,
|
614 |
+
reviewing courts shall apply local law that most closely approximates
|
615 |
+
an absolute waiver of all civil liability in connection with the
|
616 |
+
Program, unless a warranty or assumption of liability accompanies a
|
617 |
+
copy of the Program in return for a fee.
|
618 |
+
|
619 |
+
END OF TERMS AND CONDITIONS
|
620 |
+
|
621 |
+
How to Apply These Terms to Your New Programs
|
622 |
+
|
623 |
+
If you develop a new program, and you want it to be of the greatest
|
624 |
+
possible use to the public, the best way to achieve this is to make it
|
625 |
+
free software which everyone can redistribute and change under these terms.
|
626 |
+
|
627 |
+
To do so, attach the following notices to the program. It is safest
|
628 |
+
to attach them to the start of each source file to most effectively
|
629 |
+
state the exclusion of warranty; and each file should have at least
|
630 |
+
the "copyright" line and a pointer to where the full notice is found.
|
631 |
+
|
632 |
+
<one line to give the program's name and a brief idea of what it does.>
|
633 |
+
Copyright (C) <year> <name of author>
|
634 |
+
|
635 |
+
This program is free software: you can redistribute it and/or modify
|
636 |
+
it under the terms of the GNU Affero General Public License as published
|
637 |
+
by the Free Software Foundation, either version 3 of the License, or
|
638 |
+
(at your option) any later version.
|
639 |
+
|
640 |
+
This program is distributed in the hope that it will be useful,
|
641 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
642 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
643 |
+
GNU Affero General Public License for more details.
|
644 |
+
|
645 |
+
You should have received a copy of the GNU Affero General Public License
|
646 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
647 |
+
|
648 |
+
Also add information on how to contact you by electronic and paper mail.
|
649 |
+
|
650 |
+
If your software can interact with users remotely through a computer
|
651 |
+
network, you should also make sure that it provides a way for users to
|
652 |
+
get its source. For example, if your program is a web application, its
|
653 |
+
interface could display a "Source" link that leads users to an archive
|
654 |
+
of the code. There are many ways you could offer source, and different
|
655 |
+
solutions will be better for different programs; see section 13 for the
|
656 |
+
specific requirements.
|
657 |
+
|
658 |
+
You should also get your employer (if you work as a programmer) or school,
|
659 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
660 |
+
For more information on this, and how to apply and follow the GNU AGPL, see
|
661 |
+
<https://www.gnu.org/licenses/>.
|
README.md
CHANGED
@@ -1,12 +1,211 @@
|
|
1 |
---
|
2 |
title: Wonder3D
|
3 |
-
|
4 |
-
colorFrom: blue
|
5 |
-
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
title: Wonder3D
|
3 |
+
app_file: gradio_app_mv.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 3.50.2
|
|
|
|
|
6 |
---
|
7 |
+
**中文版本 [中文](README_zh.md)**
|
8 |
+
# Wonder3D
|
9 |
+
Single Image to 3D using Cross-Domain Diffusion
|
10 |
+
## [Paper](https://arxiv.org/abs/2310.15008) | [Project page](https://www.xxlong.site/Wonder3D/) | [Hugging Face Demo](https://huggingface.co/spaces/flamehaze1115/Wonder3D-demo) | [Colab from @camenduru](https://github.com/camenduru/Wonder3D-colab)
|
11 |
|
12 |
+
![](assets/fig_teaser.png)
|
13 |
+
|
14 |
+
Wonder3D reconstructs highly-detailed textured meshes from a single-view image in only 2 ∼ 3 minutes. Wonder3D first generates consistent multi-view normal maps with corresponding color images via a cross-domain diffusion model, and then leverages a novel normal fusion method to achieve fast and high-quality reconstruction.
|
15 |
+
|
16 |
+
## Usage
|
17 |
+
```bash
|
18 |
+
|
19 |
+
# First clone the repo, and use the commands in the repo
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import requests
|
23 |
+
from PIL import Image
|
24 |
+
import numpy as np
|
25 |
+
from torchvision.utils import make_grid, save_image
|
26 |
+
from diffusers import DiffusionPipeline # only tested on diffusers[torch]==0.19.3, may have conflicts with newer versions of diffusers
|
27 |
+
|
28 |
+
def load_wonder3d_pipeline():
|
29 |
+
|
30 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
31 |
+
'flamehaze1115/wonder3d-v1.0', # or use local checkpoint './ckpts'
|
32 |
+
custom_pipeline='flamehaze1115/wonder3d-pipeline',
|
33 |
+
torch_dtype=torch.float16
|
34 |
+
)
|
35 |
+
|
36 |
+
# enable xformers
|
37 |
+
pipeline.unet.enable_xformers_memory_efficient_attention()
|
38 |
+
|
39 |
+
if torch.cuda.is_available():
|
40 |
+
pipeline.to('cuda:0')
|
41 |
+
return pipeline
|
42 |
+
|
43 |
+
pipeline = load_wonder3d_pipeline()
|
44 |
+
|
45 |
+
# Download an example image.
|
46 |
+
cond = Image.open(requests.get("https://d.skis.ltd/nrp/sample-data/lysol.png", stream=True).raw)
|
47 |
+
|
48 |
+
# The object should be located in the center and resized to 80% of image height.
|
49 |
+
cond = Image.fromarray(np.array(cond)[:, :, :3])
|
50 |
+
|
51 |
+
# Run the pipeline!
|
52 |
+
images = pipeline(cond, num_inference_steps=20, output_type='pt', guidance_scale=1.0).images
|
53 |
+
|
54 |
+
result = make_grid(images, nrow=6, ncol=2, padding=0, value_range=(0, 1))
|
55 |
+
|
56 |
+
save_image(result, 'result.png')
|
57 |
+
```
|
58 |
+
|
59 |
+
## Collaborations
|
60 |
+
Our overarching mission is to enhance the speed, affordability, and quality of 3D AIGC, making the creation of 3D content accessible to all. While significant progress has been achieved in the recent years, we acknowledge there is still a substantial journey ahead. We enthusiastically invite you to engage in discussions and explore potential collaborations in any capacity. <span style="color:red">**If you're interested in connecting or partnering with us, please don't hesitate to reach out via email (xxlong@connect.hku.hk)**</span> .
|
61 |
+
|
62 |
+
## More features
|
63 |
+
|
64 |
+
The repo is still being under construction, thanks for your patience.
|
65 |
+
- [x] Local gradio demo.
|
66 |
+
- [x] Detailed tutorial.
|
67 |
+
- [x] GUI demo for mesh reconstruction
|
68 |
+
- [x] Windows support
|
69 |
+
- [x] Docker support
|
70 |
+
|
71 |
+
## Schedule
|
72 |
+
- [x] Inference code and pretrained models.
|
73 |
+
- [x] Huggingface demo.
|
74 |
+
- [ ] New model with higher resolution.
|
75 |
+
|
76 |
+
|
77 |
+
### Preparation for inference
|
78 |
+
|
79 |
+
#### Linux System Setup.
|
80 |
+
```angular2html
|
81 |
+
conda create -n wonder3d
|
82 |
+
conda activate wonder3d
|
83 |
+
pip install -r requirements.txt
|
84 |
+
pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
|
85 |
+
```
|
86 |
+
#### Windows System Setup.
|
87 |
+
|
88 |
+
Please switch to branch `main-windows` to see details of windows setup.
|
89 |
+
|
90 |
+
#### Docker Setup
|
91 |
+
see [docker/README.MD](docker/README.md)
|
92 |
+
|
93 |
+
### Inference
|
94 |
+
1. Optional. If you have troubles to connect to huggingface. Make sure you have downloaded the following models.
|
95 |
+
Download the [checkpoints](https://connecthkuhk-my.sharepoint.com/:f:/g/personal/xxlong_connect_hku_hk/Ej7fMT1PwXtKvsELTvDuzuMBebQXEkmf2IwhSjBWtKAJiA) and into the root folder.
|
96 |
+
|
97 |
+
If you are in mainland China, you may download via [aliyun](https://www.alipan.com/s/T4rLUNAVq6V).
|
98 |
+
|
99 |
+
```bash
|
100 |
+
Wonder3D
|
101 |
+
|-- ckpts
|
102 |
+
|-- unet
|
103 |
+
|-- scheduler
|
104 |
+
|-- vae
|
105 |
+
...
|
106 |
+
```
|
107 |
+
Then modify the file ./configs/mvdiffusion-joint-ortho-6views.yaml, set `pretrained_model_name_or_path="./ckpts"`
|
108 |
+
|
109 |
+
2. Download the [SAM](https://huggingface.co/spaces/abhishek/StableSAM/blob/main/sam_vit_h_4b8939.pth) model. Put it to the ``sam_pt`` folder.
|
110 |
+
```
|
111 |
+
Wonder3D
|
112 |
+
|-- sam_pt
|
113 |
+
|-- sam_vit_h_4b8939.pth
|
114 |
+
```
|
115 |
+
3. Predict foreground mask as the alpha channel. We use [Clipdrop](https://clipdrop.co/remove-background) to segment the foreground object interactively.
|
116 |
+
You may also use `rembg` to remove the backgrounds.
|
117 |
+
```bash
|
118 |
+
# !pip install rembg
|
119 |
+
import rembg
|
120 |
+
result = rembg.remove(result)
|
121 |
+
result.show()
|
122 |
+
```
|
123 |
+
4. Run Wonder3d to produce multiview-consistent normal maps and color images. Then you can check the results in the folder `./outputs`. (we use `rembg` to remove backgrounds of the results, but the segmentations are not always perfect. May consider using [Clipdrop](https://clipdrop.co/remove-background) to get masks for the generated normal maps and color images, since the quality of masks will significantly influence the reconstructed mesh quality.)
|
124 |
+
```bash
|
125 |
+
accelerate launch --config_file 1gpu.yaml test_mvdiffusion_seq.py \
|
126 |
+
--config configs/mvdiffusion-joint-ortho-6views.yaml validation_dataset.root_dir={your_data_path} \
|
127 |
+
validation_dataset.filepaths=['your_img_file'] save_dir={your_save_path}
|
128 |
+
```
|
129 |
+
|
130 |
+
see example:
|
131 |
+
|
132 |
+
```bash
|
133 |
+
accelerate launch --config_file 1gpu.yaml test_mvdiffusion_seq.py \
|
134 |
+
--config configs/mvdiffusion-joint-ortho-6views.yaml validation_dataset.root_dir=./example_images \
|
135 |
+
validation_dataset.filepaths=['owl.png'] save_dir=./outputs
|
136 |
+
```
|
137 |
+
|
138 |
+
#### Interactive inference: run your local gradio demo. (Only generate normals and colors without reconstruction)
|
139 |
+
```bash
|
140 |
+
python gradio_app_mv.py # generate multi-view normals and colors
|
141 |
+
```
|
142 |
+
|
143 |
+
5. Mesh Extraction
|
144 |
+
|
145 |
+
#### Instant-NSR Mesh Extraction
|
146 |
+
|
147 |
+
```bash
|
148 |
+
cd ./instant-nsr-pl
|
149 |
+
python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../{your_save_path}/cropsize-{crop_size}-cfg{guidance_scale:.1f}/ dataset.scene={scene}
|
150 |
+
```
|
151 |
+
|
152 |
+
see example:
|
153 |
+
|
154 |
+
```bash
|
155 |
+
cd ./instant-nsr-pl
|
156 |
+
python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../outputs/cropsize-192-cfg1.0/ dataset.scene=owl
|
157 |
+
```
|
158 |
+
|
159 |
+
Our generated normals and color images are defined in orthographic views, so the reconstructed mesh is also in orthographic camera space. If you use MeshLab to view the meshes, you can click `Toggle Orthographic Camera` in `View` tab.
|
160 |
+
|
161 |
+
#### Interactive inference: run your local gradio demo. (First generate normals and colors, and then do reconstructions. No need to perform gradio_app_mv.py first.)
|
162 |
+
```bash
|
163 |
+
python gradio_app_recon.py
|
164 |
+
```
|
165 |
+
|
166 |
+
#### NeuS-based Mesh Extraction
|
167 |
+
|
168 |
+
Since there are many complaints about the Windows setup of instant-nsr-pl, we provide the NeuS-based reconstruction, which may get rid of the requirement problems.
|
169 |
+
|
170 |
+
NeuS consumes less GPU memory and favors smooth surfaces without parameters tuning. However, NeuS consumes more times and its texture may be less sharp. If you are not sensitive to time, we recommend NeuS for optimization due to its robustness.
|
171 |
+
|
172 |
+
```bash
|
173 |
+
cd ./NeuS
|
174 |
+
bash run.sh output_folder_path scene_name
|
175 |
+
```
|
176 |
+
|
177 |
+
## Common questions
|
178 |
+
Q: Tips to get better results.
|
179 |
+
1. Wonder3D is sensitive the facing direciton of input images. By experiments, front-facing images always lead to good reconstruction.
|
180 |
+
2. Limited by resources, current implemetation only supports limited views (6 views) and low resolution (256x256). Any images will be first resized into 256x256 for generation, so images after such a downsample that still keep clear and sharp features will lead to good results.
|
181 |
+
3. Images with occlusions will cause worse reconstructions, since 6 views cannot cover the complete object. Images with less occlsuions lead to better results.
|
182 |
+
4. Increate optimization steps in instant-nsr-pl, modify `trainer.max_steps: 3000` in `instant-nsr-pl/configs/neuralangelo-ortho-wmask.yaml` to more steps like `trainer.max_steps: 10000`. Longer optimization leads to better texture.
|
183 |
+
|
184 |
+
Q: The evelation and azimuth degrees of the generated views?
|
185 |
+
|
186 |
+
A: Unlike that the prior works such as Zero123, SyncDreamer and One2345 adopt object world system, our views are defined in the camera system of the input image. The six views are in the plane with 0 elevation degree in the camera system of the input image. Therefore we don't need to estimate an elevation degree for input image. The azimuth degrees of the six views are 0, 45, 90, 180, -90, -45 respectively.
|
187 |
+
|
188 |
+
Q: The focal length of the generated views?
|
189 |
+
|
190 |
+
A: We assume the input images are captured by orthographic camera, so the generated views are also in orthographic space. This design enables our model to keep strong generlaization on unreal images, but sometimes it may suffer from focal lens distortions on real-captured images.
|
191 |
+
## Acknowledgement
|
192 |
+
We have intensively borrow codes from the following repositories. Many thanks to the authors for sharing their codes.
|
193 |
+
- [stable diffusion](https://github.com/CompVis/stable-diffusion)
|
194 |
+
- [zero123](https://github.com/cvlab-columbia/zero123)
|
195 |
+
- [NeuS](https://github.com/Totoro97/NeuS)
|
196 |
+
- [SyncDreamer](https://github.com/liuyuan-pal/SyncDreamer)
|
197 |
+
- [instant-nsr-pl](https://github.com/bennyguo/instant-nsr-pl)
|
198 |
+
|
199 |
+
## License
|
200 |
+
Wonder3D is under [AGPL-3.0](https://www.gnu.org/licenses/agpl-3.0.en.html), so any downstream solution and products (including cloud services) that include wonder3d code or a trained model (both pretrained or custom trained) inside it should be open-sourced to comply with the AGPL conditions. If you have any questions about the usage of Wonder3D, please contact us first.
|
201 |
+
|
202 |
+
## Citation
|
203 |
+
If you find this repository useful in your project, please cite the following work. :)
|
204 |
+
```
|
205 |
+
@article{long2023wonder3d,
|
206 |
+
title={Wonder3D: Single Image to 3D using Cross-Domain Diffusion},
|
207 |
+
author={Long, Xiaoxiao and Guo, Yuan-Chen and Lin, Cheng and Liu, Yuan and Dou, Zhiyang and Liu, Lingjie and Ma, Yuexin and Zhang, Song-Hai and Habermann, Marc and Theobalt, Christian and others},
|
208 |
+
journal={arXiv preprint arXiv:2310.15008},
|
209 |
+
year={2023}
|
210 |
+
}
|
211 |
+
```
|
README_zh.md
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
**其他语言版本 [English](README.md)**
|
2 |
+
|
3 |
+
# Wonder3D
|
4 |
+
Single Image to 3D using Cross-Domain Diffusion
|
5 |
+
## [Paper](https://arxiv.org/abs/2310.15008) | [Project page](https://www.xxlong.site/Wonder3D/) | [Hugging Face Demo](https://huggingface.co/spaces/flamehaze1115/Wonder3D-demo) | [Colab from @camenduru](https://github.com/camenduru/Wonder3D-colab)
|
6 |
+
|
7 |
+
![](assets/fig_teaser.png)
|
8 |
+
|
9 |
+
Wonder3D仅需2至3分钟即可从单视图图像中重建出高度详细的纹理网格。Wonder3D首先通过跨域扩散模型生成一致的多视图法线图与相应的彩色图像,然后利用一种新颖的法线融合方法实现快速且高质量的重建。
|
10 |
+
|
11 |
+
## Usage 使用
|
12 |
+
```bash
|
13 |
+
|
14 |
+
import torch
|
15 |
+
import requests
|
16 |
+
from PIL import Image
|
17 |
+
import numpy as np
|
18 |
+
from torchvision.utils import make_grid, save_image
|
19 |
+
from diffusers import DiffusionPipeline # only tested on diffusers[torch]==0.19.3, may have conflicts with newer versions of diffusers
|
20 |
+
|
21 |
+
def load_wonder3d_pipeline():
|
22 |
+
|
23 |
+
pipeline = DiffusionPipeline.from_pretrained(
|
24 |
+
'flamehaze1115/wonder3d-v1.0', # or use local checkpoint './ckpts'
|
25 |
+
custom_pipeline='flamehaze1115/wonder3d-pipeline',
|
26 |
+
torch_dtype=torch.float16
|
27 |
+
)
|
28 |
+
|
29 |
+
# enable xformers
|
30 |
+
pipeline.unet.enable_xformers_memory_efficient_attention()
|
31 |
+
|
32 |
+
if torch.cuda.is_available():
|
33 |
+
pipeline.to('cuda:0')
|
34 |
+
return pipeline
|
35 |
+
|
36 |
+
pipeline = load_wonder3d_pipeline()
|
37 |
+
|
38 |
+
# Download an example image.
|
39 |
+
cond = Image.open(requests.get("https://d.skis.ltd/nrp/sample-data/lysol.png", stream=True).raw)
|
40 |
+
|
41 |
+
# The object should be located in the center and resized to 80% of image height.
|
42 |
+
cond = Image.fromarray(np.array(cond)[:, :, :3])
|
43 |
+
|
44 |
+
# Run the pipeline!
|
45 |
+
images = pipeline(cond, num_inference_steps=20, output_type='pt', guidance_scale=1.0).images
|
46 |
+
|
47 |
+
result = make_grid(images, nrow=6, ncol=2, padding=0, value_range=(0, 1))
|
48 |
+
|
49 |
+
save_image(result, 'result.png')
|
50 |
+
```
|
51 |
+
|
52 |
+
## Collaborations 合作
|
53 |
+
我们的总体使命是提高3D人工智能图形生成(3D AIGC)的速度、可负担性和质量,使所有人都能够轻松创建3D内容。尽管近年来取得了显著的进展,我们承认前方仍有很长的路要走。我们热切邀请您参与讨论并在任何方面探索潜在的合作机会。<span style="color:red">**如果您有兴趣与我们联系或合作,请随时通过电子邮件(xxlong@connect.hku.hk)联系我们**</span>。
|
54 |
+
|
55 |
+
## More features
|
56 |
+
|
57 |
+
The repo is still being under construction, thanks for your patience.
|
58 |
+
- [x] Local gradio demo.
|
59 |
+
- [x] Detailed tutorial.
|
60 |
+
- [x] GUI demo for mesh reconstruction
|
61 |
+
- [x] Windows support
|
62 |
+
- [x] Docker support
|
63 |
+
|
64 |
+
## Schedule
|
65 |
+
- [x] Inference code and pretrained models.
|
66 |
+
- [x] Huggingface demo.
|
67 |
+
- [ ] New model with higher resolution.
|
68 |
+
|
69 |
+
|
70 |
+
### Preparation for inference 测试准备
|
71 |
+
|
72 |
+
#### Linux System Setup.
|
73 |
+
```angular2html
|
74 |
+
conda create -n wonder3d
|
75 |
+
conda activate wonder3d
|
76 |
+
pip install -r requirements.txt
|
77 |
+
pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
|
78 |
+
```
|
79 |
+
#### Windows System Setup.
|
80 |
+
|
81 |
+
请切换到`main-windows`分支以查看Windows设置的详细信息。
|
82 |
+
|
83 |
+
#### Docker Setup
|
84 |
+
详见 [docker/README.MD](docker/README.md)
|
85 |
+
|
86 |
+
### Inference
|
87 |
+
1. 可选。如果您在连接到Hugging Face时遇到问题,请确保已下载以下模型。
|
88 |
+
下载[checkpoints](https://connecthkuhk-my.sharepoint.com/:f:/g/personal/xxlong_connect_hku_hk/Ej7fMT1PwXtKvsELTvDuzuMBebQXEkmf2IwhSjBWtKAJiA)并放入根文件夹中。
|
89 |
+
|
90 |
+
国内用户可下载: [阿里云盘](https://www.alipan.com/s/T4rLUNAVq6V)
|
91 |
+
|
92 |
+
```bash
|
93 |
+
Wonder3D
|
94 |
+
|-- ckpts
|
95 |
+
|-- unet
|
96 |
+
|-- scheduler
|
97 |
+
|-- vae
|
98 |
+
...
|
99 |
+
```
|
100 |
+
然后更改文件 ./configs/mvdiffusion-joint-ortho-6views.yaml, 设置 `pretrained_model_name_or_path="./ckpts"`
|
101 |
+
|
102 |
+
2. 下载模型 [SAM](https://huggingface.co/spaces/abhishek/StableSAM/blob/main/sam_vit_h_4b8939.pth) . 放置在 ``sam_pt`` 文件夹.
|
103 |
+
```
|
104 |
+
Wonder3D
|
105 |
+
|-- sam_pt
|
106 |
+
|-- sam_vit_h_4b8939.pth
|
107 |
+
```
|
108 |
+
3. 预测前景蒙版作为阿尔法通道。我们使用[Clipdrop](https://clipdrop.co/remove-background)来交互地分割前景对象。
|
109 |
+
您还可以使用`rembg`来去除背景。
|
110 |
+
```bash
|
111 |
+
# !pip install rembg
|
112 |
+
import rembg
|
113 |
+
result = rembg.remove(result)
|
114 |
+
result.show()
|
115 |
+
```
|
116 |
+
4. 运行Wonder3D以生成多视角一致的法线图和彩色图像。然后,您可以在文件夹`./outputs`中检查结果(我们使用`rembg`去除结果的背景,但分割并不总是完美的。可以考虑使用[Clipdrop](https://clipdrop.co/remove-background)获取生成的法线图和彩色图像的蒙版,因为蒙版的质量将显著影响重建的网格质量)。
|
117 |
+
```bash
|
118 |
+
accelerate launch --config_file 1gpu.yaml test_mvdiffusion_seq.py \
|
119 |
+
--config configs/mvdiffusion-joint-ortho-6views.yaml validation_dataset.root_dir={your_data_path} \
|
120 |
+
validation_dataset.filepaths=['your_img_file'] save_dir={your_save_path}
|
121 |
+
```
|
122 |
+
|
123 |
+
示例:
|
124 |
+
|
125 |
+
```bash
|
126 |
+
accelerate launch --config_file 1gpu.yaml test_mvdiffusion_seq.py \
|
127 |
+
--config configs/mvdiffusion-joint-ortho-6views.yaml validation_dataset.root_dir=./example_images \
|
128 |
+
validation_dataset.filepaths=['owl.png'] save_dir=./outputs
|
129 |
+
```
|
130 |
+
|
131 |
+
#### 运行本地的Gradio演示。仅生成法线和颜色,无需进行重建。
|
132 |
+
```bash
|
133 |
+
python gradio_app_mv.py # generate multi-view normals and colors
|
134 |
+
```
|
135 |
+
|
136 |
+
5. Mesh Extraction
|
137 |
+
|
138 |
+
#### Instant-NSR Mesh Extraction
|
139 |
+
|
140 |
+
```bash
|
141 |
+
cd ./instant-nsr-pl
|
142 |
+
python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../{your_save_path}/cropsize-{crop_size}-cfg{guidance_scale:.1f}/ dataset.scene={scene}
|
143 |
+
```
|
144 |
+
|
145 |
+
示例:
|
146 |
+
|
147 |
+
```bash
|
148 |
+
cd ./instant-nsr-pl
|
149 |
+
python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../outputs/cropsize-192-cfg1.0/ dataset.scene=owl
|
150 |
+
```
|
151 |
+
|
152 |
+
我们生成的法线图和彩色图像是在正交视图中定义的,因此重建的网格也处于正交摄像机空间。如果您使用MeshLab查看网格,可以在“View”选项卡中单击“Toggle Orthographic Camera”切换到正交相机。
|
153 |
+
|
154 |
+
#### 运行本地的Gradio演示。首先生成法线和颜色,然后进行重建。无需首先执行`gradio_app_mv.py`。
|
155 |
+
```bash
|
156 |
+
python gradio_app_recon.py
|
157 |
+
```
|
158 |
+
|
159 |
+
#### NeuS-based Mesh Extraction
|
160 |
+
|
161 |
+
由于许多用户对于instant-nsr-pl的Windows设置提出了抱怨,我们提供了基于NeuS的重建,这可能消除了一些要求方面的问题。
|
162 |
+
|
163 |
+
NeuS消耗较少的GPU内存,对平滑表面有利,无需参数调整。然而,NeuS需要更多时间,其纹理可能不够清晰。如果您对时间不太敏感,我们建议由于其稳健性而使用NeuS进行优化。
|
164 |
+
|
165 |
+
```bash
|
166 |
+
cd ./NeuS
|
167 |
+
bash run.sh output_folder_path scene_name
|
168 |
+
```
|
169 |
+
|
170 |
+
## 常见问题
|
171 |
+
**获取更好结果的提示:**
|
172 |
+
1. **图片朝向方向敏感:** Wonder3D对输入图像的面向方向敏感。通过实验证明,面向前方的图像通常会导致良好的重建结果。
|
173 |
+
2. **图像分辨率:** 受资源限制,当前实现仅支持有限的视图(6个视图)和低分辨率(256x256)。任何图像都将首先调整大小为256x256进行生成,因此在这样的降采样后仍然保持清晰而锐利特征的图像将导致良好的结果。
|
174 |
+
3. **处理遮挡:** 具有遮挡的图像会导致更差的重建,因为6个视图无法完全覆盖整个对象。具有较少遮挡的图像通常会产生更好的结果。
|
175 |
+
4. **增加instant-nsr-pl中的优化步骤:** 在instant-nsr-pl中增加优化步骤。在`instant-nsr-pl/configs/neuralangelo-ortho-wmask.yaml`中修改`trainer.max_steps: 3000`为更多步骤,例如`trainer.max_steps: 10000`。更长的优化步骤会导致更好的纹理。
|
176 |
+
|
177 |
+
**生成视图信息:**
|
178 |
+
- **仰角和方位角度:** 与Zero123、SyncDreamer和One2345等先前作品采用对象世界系统不同,我们的视图是在输入图像的相机系统中定义的。六个视图在输入图像的相机系统中的平面上,仰角为0度。因此,我们不需要为输入图像估算仰角。六个视图的方位角度分别为0、45、90、180、-90、-45。
|
179 |
+
|
180 |
+
**生成视图的焦距:**
|
181 |
+
- 我们假设输入图像是由正交相机捕获的,因此生成的视图也在正交空间中。这种设计使得我们的模型能够在虚构图像上保持强大的泛化能力,但有时可能在实际捕获的图像上受到焦距镜头畸变的影响。
|
182 |
+
|
183 |
+
## 致谢
|
184 |
+
We have intensively borrow codes from the following repositories. Many thanks to the authors for sharing their codes.
|
185 |
+
- [stable diffusion](https://github.com/CompVis/stable-diffusion)
|
186 |
+
- [zero123](https://github.com/cvlab-columbia/zero123)
|
187 |
+
- [NeuS](https://github.com/Totoro97/NeuS)
|
188 |
+
- [SyncDreamer](https://github.com/liuyuan-pal/SyncDreamer)
|
189 |
+
- [instant-nsr-pl](https://github.com/bennyguo/instant-nsr-pl)
|
190 |
+
|
191 |
+
## 协议
|
192 |
+
Wonder3D采用[AGPL-3.0](https://www.gnu.org/licenses/agpl-3.0.en.html)许可,因此任何包含Wonder3D代码或其中训练的模型(无论是预训练还是定制训练)的下游解决方案和产品(包括云服务)都应该开源以符合AGPL条件。如果您对Wonder3D的使用有任何疑问,请首先与我们联系。
|
193 |
+
|
194 |
+
## 引用
|
195 |
+
如果您在项目中发现这个项目对您有用,请引用以下工作。 :)
|
196 |
+
```
|
197 |
+
@article{long2023wonder3d,
|
198 |
+
title={Wonder3D: Single Image to 3D using Cross-Domain Diffusion},
|
199 |
+
author={Long, Xiaoxiao and Guo, Yuan-Chen and Lin, Cheng and Liu, Yuan and Dou, Zhiyang and Liu, Lingjie and Ma, Yuexin and Zhang, Song-Hai and Habermann, Marc and Theobalt, Christian and others},
|
200 |
+
journal={arXiv preprint arXiv:2310.15008},
|
201 |
+
year={2023}
|
202 |
+
}
|
203 |
+
```
|
assets/fig_teaser.png
ADDED
Git LFS Details
|
configs/mvdiffusion-joint-ortho-6views.yaml
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pretrained_model_name_or_path: 'flamehaze1115/wonder3d-v1.0' # or './ckpts'
|
2 |
+
revision: null
|
3 |
+
validation_dataset:
|
4 |
+
root_dir: "./example_images" # the folder path stores testing images
|
5 |
+
num_views: 6
|
6 |
+
bg_color: 'white'
|
7 |
+
img_wh: [256, 256]
|
8 |
+
num_validation_samples: 1000
|
9 |
+
crop_size: 192
|
10 |
+
filepaths: ['owl.png'] # the test image names. leave it empty, test all images in the folder
|
11 |
+
|
12 |
+
save_dir: 'outputs/'
|
13 |
+
|
14 |
+
pred_type: 'joint'
|
15 |
+
seed: 42
|
16 |
+
validation_batch_size: 1
|
17 |
+
dataloader_num_workers: 64
|
18 |
+
|
19 |
+
local_rank: -1
|
20 |
+
|
21 |
+
pipe_kwargs:
|
22 |
+
camera_embedding_type: 'e_de_da_sincos'
|
23 |
+
num_views: 6
|
24 |
+
|
25 |
+
validation_guidance_scales: [1.0]
|
26 |
+
pipe_validation_kwargs:
|
27 |
+
eta: 1.0
|
28 |
+
validation_grid_nrow: 6
|
29 |
+
|
30 |
+
unet_from_pretrained_kwargs:
|
31 |
+
camera_embedding_type: 'e_de_da_sincos'
|
32 |
+
projection_class_embeddings_input_dim: 10
|
33 |
+
num_views: 6
|
34 |
+
sample_size: 32
|
35 |
+
cd_attention_mid: true
|
36 |
+
zero_init_conv_in: false
|
37 |
+
zero_init_camera_projection: false
|
38 |
+
|
39 |
+
num_views: 6
|
40 |
+
camera_embedding_type: 'e_de_da_sincos'
|
41 |
+
|
42 |
+
enable_xformers_memory_efficient_attention: true
|
docker/Dockerfile
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# get the development image from nvidia cuda 11.7
|
2 |
+
FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04
|
3 |
+
|
4 |
+
LABEL name="Wonder3D" \
|
5 |
+
maintainer="Tiancheng <athinkingneal@gmail.com>" \
|
6 |
+
lastupdate="2024-01-05"
|
7 |
+
|
8 |
+
# create workspace folder and set it as working directory
|
9 |
+
RUN mkdir -p /workspace
|
10 |
+
WORKDIR /workspace
|
11 |
+
|
12 |
+
# Set the timezone
|
13 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
14 |
+
RUN apt-get update && \
|
15 |
+
apt-get install -y tzdata && \
|
16 |
+
ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
|
17 |
+
dpkg-reconfigure --frontend noninteractive tzdata
|
18 |
+
|
19 |
+
# update package lists and install git, wget, vim, libgl1-mesa-glx, and libglib2.0-0
|
20 |
+
RUN apt-get update && \
|
21 |
+
apt-get install -y git wget vim libgl1-mesa-glx libglib2.0-0 unzip
|
22 |
+
|
23 |
+
# install conda
|
24 |
+
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
|
25 |
+
chmod +x Miniconda3-latest-Linux-x86_64.sh && \
|
26 |
+
./Miniconda3-latest-Linux-x86_64.sh -b -p /workspace/miniconda3 && \
|
27 |
+
rm Miniconda3-latest-Linux-x86_64.sh
|
28 |
+
|
29 |
+
# update PATH environment variable
|
30 |
+
ENV PATH="/workspace/miniconda3/bin:${PATH}"
|
31 |
+
|
32 |
+
# initialize conda
|
33 |
+
RUN conda init bash
|
34 |
+
|
35 |
+
# create and activate conda environment
|
36 |
+
RUN conda create -n wonder3d python=3.8 && echo "source activate wonder3d" > ~/.bashrc
|
37 |
+
ENV PATH /workspace/miniconda3/envs/wonder3d/bin:$PATH
|
38 |
+
|
39 |
+
|
40 |
+
# clone the repository
|
41 |
+
RUN git clone https://github.com/xxlong0/Wonder3D.git && \
|
42 |
+
cd /workspace/Wonder3D
|
43 |
+
|
44 |
+
# change the working directory to the repository
|
45 |
+
WORKDIR /workspace/Wonder3D
|
46 |
+
|
47 |
+
# install pytorch 1.13.1 and torchvision
|
48 |
+
RUN pip install -r docker/requirements.txt
|
49 |
+
|
50 |
+
# install the specific version of nerfacc corresponding to torch 1.13.0 and cuda 11.7, otherwise the nerfacc will freeze during cuda setup
|
51 |
+
RUN pip install nerfacc==0.3.3 -f https://nerfacc-bucket.s3.us-west-2.amazonaws.com/whl/torch-1.13.0_cu117.html
|
52 |
+
|
53 |
+
# install tiny cuda during docker setup will cause error, need to install it manually in the container
|
54 |
+
# RUN pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
|
55 |
+
|
56 |
+
|
docker/README.md
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Docker setup
|
2 |
+
|
3 |
+
This docker setup is tested on Ubunu20.04.
|
4 |
+
|
5 |
+
make sure you are under directory yourworkspace/Wonder3D/
|
6 |
+
|
7 |
+
run
|
8 |
+
|
9 |
+
`docker build --no-cache -t wonder3d/deploy:cuda11.7 -f docker/Dockerfile .`
|
10 |
+
|
11 |
+
then run
|
12 |
+
|
13 |
+
`docker run --gpus all -it wonder3d/deploy:cuda11.7 bash`
|
14 |
+
|
15 |
+
|
16 |
+
## Nvidia Container Toolkit setup
|
17 |
+
|
18 |
+
You will have trouble enabling gpu for docker if you haven't installed **NVIDIA Container Toolkit** on you local machine before. You can skip this section if you have already installed it. Follow the instruction in this website to install it.
|
19 |
+
|
20 |
+
https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
|
21 |
+
|
22 |
+
or you can run the following command to install it with apt:
|
23 |
+
|
24 |
+
1.Configure the production repository:
|
25 |
+
|
26 |
+
```bash
|
27 |
+
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
|
28 |
+
&& curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \
|
29 |
+
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
|
30 |
+
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
31 |
+
```
|
32 |
+
|
33 |
+
2.Update the packages list from the repository:
|
34 |
+
|
35 |
+
`sed -i -e '/experimental/ s/^#//g' /etc/apt/sources.list.d/nvidia-container-toolkit.list`
|
36 |
+
|
37 |
+
3.Install the NVIDIA Container Toolkit packages:
|
38 |
+
|
39 |
+
`sudo apt-get install -y nvidia-container-toolkit`
|
40 |
+
|
41 |
+
Remember to restart the docker:
|
42 |
+
|
43 |
+
`sudo systemctl restart docker`
|
44 |
+
|
45 |
+
now you can run the following command:
|
46 |
+
|
47 |
+
`docker run --gpus all -it wonder3d/deploy:cuda11.7 bash`
|
48 |
+
|
49 |
+
|
50 |
+
## Install Tiny Cudann
|
51 |
+
|
52 |
+
After you start the container, run the following command to install tiny cudann. Somehow this pip installation can not be done during the docker build, so you have to do it manually after the docker is started.
|
53 |
+
|
54 |
+
`pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch`
|
55 |
+
|
56 |
+
|
57 |
+
Now you should be good to go, good luck and have fun :)
|
docker/requirements.txt
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
--extra-index-url https://download.pytorch.org/whl/cu117
|
2 |
+
|
3 |
+
# nerfacc==0.3.3, nefacc needs to be installed from the specific location
|
4 |
+
# see installation part in this link: https://github.com/nerfstudio-project/nerfacc
|
5 |
+
|
6 |
+
torch==1.13.1+cu117
|
7 |
+
torchvision==0.14.1+cu117
|
8 |
+
diffusers[torch]==0.19.3
|
9 |
+
xformers==0.0.16
|
10 |
+
transformers>=4.25.1
|
11 |
+
bitsandbytes==0.35.4
|
12 |
+
decord==0.6.0
|
13 |
+
pytorch-lightning<2
|
14 |
+
omegaconf==2.2.3
|
15 |
+
trimesh==3.9.8
|
16 |
+
pyhocon==0.3.57
|
17 |
+
icecream==2.1.0
|
18 |
+
PyMCubes==0.1.2
|
19 |
+
accelerate
|
20 |
+
modelcards
|
21 |
+
einops
|
22 |
+
ftfy
|
23 |
+
piq
|
24 |
+
matplotlib
|
25 |
+
opencv-python
|
26 |
+
imageio
|
27 |
+
imageio-ffmpeg
|
28 |
+
scipy
|
29 |
+
pyransac3d
|
30 |
+
torch_efficient_distloss
|
31 |
+
tensorboard
|
32 |
+
rembg
|
33 |
+
segment_anything
|
34 |
+
gradio==3.50.2
|
35 |
+
triton
|
36 |
+
rich
|
example_images/14_10_29_489_Tiger_1__1.png
ADDED
example_images/box.png
ADDED
example_images/bread.png
ADDED
example_images/cat.png
ADDED
example_images/cat_head.png
ADDED
example_images/chili.png
ADDED
example_images/duola.png
ADDED
example_images/halloween.png
ADDED
example_images/head.png
ADDED
example_images/kettle.png
ADDED
example_images/kunkun.png
ADDED
example_images/milk.png
ADDED
example_images/owl.png
ADDED
example_images/poro.png
ADDED
example_images/pumpkin.png
ADDED
example_images/skull.png
ADDED
example_images/stone.png
ADDED
example_images/teapot.png
ADDED
example_images/tiger-head-3d-model-obj-stl.png
ADDED
gradio_app_mv.py
ADDED
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import fire
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
+
from functools import partial
|
7 |
+
|
8 |
+
import cv2
|
9 |
+
import time
|
10 |
+
import numpy as np
|
11 |
+
from rembg import remove
|
12 |
+
from segment_anything import sam_model_registry, SamPredictor
|
13 |
+
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
import numpy
|
17 |
+
import torch
|
18 |
+
import rembg
|
19 |
+
import threading
|
20 |
+
import urllib.request
|
21 |
+
from PIL import Image
|
22 |
+
from typing import Dict, Optional, Tuple, List
|
23 |
+
from dataclasses import dataclass
|
24 |
+
import streamlit as st
|
25 |
+
import huggingface_hub
|
26 |
+
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
27 |
+
from mvdiffusion.models.unet_mv2d_condition import UNetMV2DConditionModel
|
28 |
+
from mvdiffusion.data.single_image_dataset import SingleImageDataset as MVDiffusionDataset
|
29 |
+
from mvdiffusion.pipelines.pipeline_mvdiffusion_image import MVDiffusionImagePipeline
|
30 |
+
from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
|
31 |
+
from einops import rearrange
|
32 |
+
import numpy as np
|
33 |
+
import subprocess
|
34 |
+
from datetime import datetime
|
35 |
+
|
36 |
+
def save_image(tensor):
|
37 |
+
ndarr = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
|
38 |
+
# pdb.set_trace()
|
39 |
+
im = Image.fromarray(ndarr)
|
40 |
+
return ndarr
|
41 |
+
|
42 |
+
|
43 |
+
def save_image_to_disk(tensor, fp):
|
44 |
+
ndarr = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
|
45 |
+
# pdb.set_trace()
|
46 |
+
im = Image.fromarray(ndarr)
|
47 |
+
im.save(fp)
|
48 |
+
return ndarr
|
49 |
+
|
50 |
+
|
51 |
+
def save_image_numpy(ndarr, fp):
|
52 |
+
im = Image.fromarray(ndarr)
|
53 |
+
im.save(fp)
|
54 |
+
|
55 |
+
|
56 |
+
weight_dtype = torch.float16
|
57 |
+
|
58 |
+
_TITLE = '''Wonder3D: Single Image to 3D using Cross-Domain Diffusion'''
|
59 |
+
_DESCRIPTION = '''
|
60 |
+
<div>
|
61 |
+
Generate consistent multi-view normals maps and color images.
|
62 |
+
<a style="display:inline-block; margin-left: .5em" href='https://github.com/xxlong0/Wonder3D/'><img src='https://img.shields.io/github/stars/xxlong0/Wonder3D?style=social' /></a>
|
63 |
+
</div>
|
64 |
+
<div>
|
65 |
+
The demo does not include the mesh reconstruction part, please visit <a href="https://github.com/xxlong0/Wonder3D/">our github repo</a> to get a textured mesh.
|
66 |
+
</div>
|
67 |
+
'''
|
68 |
+
_GPU_ID = 0
|
69 |
+
|
70 |
+
|
71 |
+
if not hasattr(Image, 'Resampling'):
|
72 |
+
Image.Resampling = Image
|
73 |
+
|
74 |
+
|
75 |
+
def sam_init():
|
76 |
+
sam_checkpoint = os.path.join(os.path.dirname(__file__), "sam_pt", "sam_vit_h_4b8939.pth")
|
77 |
+
model_type = "vit_h"
|
78 |
+
|
79 |
+
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device=f"cuda:{_GPU_ID}")
|
80 |
+
predictor = SamPredictor(sam)
|
81 |
+
return predictor
|
82 |
+
|
83 |
+
|
84 |
+
def sam_segment(predictor, input_image, *bbox_coords):
|
85 |
+
bbox = np.array(bbox_coords)
|
86 |
+
image = np.asarray(input_image)
|
87 |
+
|
88 |
+
start_time = time.time()
|
89 |
+
predictor.set_image(image)
|
90 |
+
|
91 |
+
masks_bbox, scores_bbox, logits_bbox = predictor.predict(box=bbox, multimask_output=True)
|
92 |
+
|
93 |
+
print(f"SAM Time: {time.time() - start_time:.3f}s")
|
94 |
+
out_image = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
|
95 |
+
out_image[:, :, :3] = image
|
96 |
+
out_image_bbox = out_image.copy()
|
97 |
+
out_image_bbox[:, :, 3] = masks_bbox[-1].astype(np.uint8) * 255
|
98 |
+
torch.cuda.empty_cache()
|
99 |
+
return Image.fromarray(out_image_bbox, mode='RGBA')
|
100 |
+
|
101 |
+
|
102 |
+
def expand2square(pil_img, background_color):
|
103 |
+
width, height = pil_img.size
|
104 |
+
if width == height:
|
105 |
+
return pil_img
|
106 |
+
elif width > height:
|
107 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
108 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
109 |
+
return result
|
110 |
+
else:
|
111 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
112 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
113 |
+
return result
|
114 |
+
|
115 |
+
|
116 |
+
def preprocess(predictor, input_image, chk_group=None, segment=True, rescale=False):
|
117 |
+
RES = 1024
|
118 |
+
input_image.thumbnail([RES, RES], Image.Resampling.LANCZOS)
|
119 |
+
if chk_group is not None:
|
120 |
+
segment = "Background Removal" in chk_group
|
121 |
+
rescale = "Rescale" in chk_group
|
122 |
+
if segment:
|
123 |
+
image_rem = input_image.convert('RGBA')
|
124 |
+
image_nobg = remove(image_rem, alpha_matting=True)
|
125 |
+
arr = np.asarray(image_nobg)[:, :, -1]
|
126 |
+
x_nonzero = np.nonzero(arr.sum(axis=0))
|
127 |
+
y_nonzero = np.nonzero(arr.sum(axis=1))
|
128 |
+
x_min = int(x_nonzero[0].min())
|
129 |
+
y_min = int(y_nonzero[0].min())
|
130 |
+
x_max = int(x_nonzero[0].max())
|
131 |
+
y_max = int(y_nonzero[0].max())
|
132 |
+
input_image = sam_segment(predictor, input_image.convert('RGB'), x_min, y_min, x_max, y_max)
|
133 |
+
# Rescale and recenter
|
134 |
+
if rescale:
|
135 |
+
image_arr = np.array(input_image)
|
136 |
+
in_w, in_h = image_arr.shape[:2]
|
137 |
+
out_res = min(RES, max(in_w, in_h))
|
138 |
+
ret, mask = cv2.threshold(np.array(input_image.split()[-1]), 0, 255, cv2.THRESH_BINARY)
|
139 |
+
x, y, w, h = cv2.boundingRect(mask)
|
140 |
+
max_size = max(w, h)
|
141 |
+
ratio = 0.75
|
142 |
+
side_len = int(max_size / ratio)
|
143 |
+
padded_image = np.zeros((side_len, side_len, 4), dtype=np.uint8)
|
144 |
+
center = side_len // 2
|
145 |
+
padded_image[center - h // 2 : center - h // 2 + h, center - w // 2 : center - w // 2 + w] = image_arr[y : y + h, x : x + w]
|
146 |
+
rgba = Image.fromarray(padded_image).resize((out_res, out_res), Image.LANCZOS)
|
147 |
+
|
148 |
+
rgba_arr = np.array(rgba) / 255.0
|
149 |
+
rgb = rgba_arr[..., :3] * rgba_arr[..., -1:] + (1 - rgba_arr[..., -1:])
|
150 |
+
input_image = Image.fromarray((rgb * 255).astype(np.uint8))
|
151 |
+
else:
|
152 |
+
input_image = expand2square(input_image, (127, 127, 127, 0))
|
153 |
+
return input_image, input_image.resize((320, 320), Image.Resampling.LANCZOS)
|
154 |
+
|
155 |
+
|
156 |
+
def load_wonder3d_pipeline(cfg):
|
157 |
+
|
158 |
+
pipeline = MVDiffusionImagePipeline.from_pretrained(
|
159 |
+
cfg.pretrained_model_name_or_path,
|
160 |
+
torch_dtype=weight_dtype
|
161 |
+
)
|
162 |
+
|
163 |
+
# pipeline.to('cuda:0')
|
164 |
+
pipeline.unet.enable_xformers_memory_efficient_attention()
|
165 |
+
|
166 |
+
|
167 |
+
if torch.cuda.is_available():
|
168 |
+
pipeline.to('cuda:0')
|
169 |
+
# sys.main_lock = threading.Lock()
|
170 |
+
return pipeline
|
171 |
+
|
172 |
+
|
173 |
+
from mvdiffusion.data.single_image_dataset import SingleImageDataset
|
174 |
+
|
175 |
+
|
176 |
+
def prepare_data(single_image, crop_size):
|
177 |
+
dataset = SingleImageDataset(root_dir='', num_views=6, img_wh=[256, 256], bg_color='white', crop_size=crop_size, single_image=single_image)
|
178 |
+
return dataset[0]
|
179 |
+
|
180 |
+
scene = 'scene'
|
181 |
+
|
182 |
+
def run_pipeline(pipeline, cfg, single_image, guidance_scale, steps, seed, crop_size, chk_group=None):
|
183 |
+
import pdb
|
184 |
+
global scene
|
185 |
+
# pdb.set_trace()
|
186 |
+
|
187 |
+
if chk_group is not None:
|
188 |
+
write_image = "Write Results" in chk_group
|
189 |
+
|
190 |
+
batch = prepare_data(single_image, crop_size)
|
191 |
+
|
192 |
+
pipeline.set_progress_bar_config(disable=True)
|
193 |
+
seed = int(seed)
|
194 |
+
generator = torch.Generator(device=pipeline.unet.device).manual_seed(seed)
|
195 |
+
|
196 |
+
# repeat (2B, Nv, 3, H, W)
|
197 |
+
imgs_in = torch.cat([batch['imgs_in']] * 2, dim=0).to(weight_dtype)
|
198 |
+
|
199 |
+
# (2B, Nv, Nce)
|
200 |
+
camera_embeddings = torch.cat([batch['camera_embeddings']] * 2, dim=0).to(weight_dtype)
|
201 |
+
|
202 |
+
task_embeddings = torch.cat([batch['normal_task_embeddings'], batch['color_task_embeddings']], dim=0).to(weight_dtype)
|
203 |
+
|
204 |
+
camera_embeddings = torch.cat([camera_embeddings, task_embeddings], dim=-1).to(weight_dtype)
|
205 |
+
|
206 |
+
# (B*Nv, 3, H, W)
|
207 |
+
imgs_in = rearrange(imgs_in, "Nv C H W -> (Nv) C H W")
|
208 |
+
# (B*Nv, Nce)
|
209 |
+
# camera_embeddings = rearrange(camera_embeddings, "B Nv Nce -> (B Nv) Nce")
|
210 |
+
|
211 |
+
out = pipeline(
|
212 |
+
imgs_in,
|
213 |
+
# camera_embeddings,
|
214 |
+
generator=generator,
|
215 |
+
guidance_scale=guidance_scale,
|
216 |
+
num_inference_steps=steps,
|
217 |
+
output_type='pt',
|
218 |
+
num_images_per_prompt=1,
|
219 |
+
**cfg.pipe_validation_kwargs,
|
220 |
+
).images
|
221 |
+
|
222 |
+
bsz = out.shape[0] // 2
|
223 |
+
normals_pred = out[:bsz]
|
224 |
+
images_pred = out[bsz:]
|
225 |
+
num_views = 6
|
226 |
+
if write_image:
|
227 |
+
VIEWS = ['front', 'front_right', 'right', 'back', 'left', 'front_left']
|
228 |
+
cur_dir = os.path.join("./outputs", f"cropsize-{int(crop_size)}-cfg{guidance_scale:.1f}")
|
229 |
+
|
230 |
+
scene = 'scene'+datetime.now().strftime('@%Y%m%d-%H%M%S')
|
231 |
+
scene_dir = os.path.join(cur_dir, scene)
|
232 |
+
normal_dir = os.path.join(scene_dir, "normals")
|
233 |
+
masked_colors_dir = os.path.join(scene_dir, "masked_colors")
|
234 |
+
os.makedirs(normal_dir, exist_ok=True)
|
235 |
+
os.makedirs(masked_colors_dir, exist_ok=True)
|
236 |
+
for j in range(num_views):
|
237 |
+
view = VIEWS[j]
|
238 |
+
normal = normals_pred[j]
|
239 |
+
color = images_pred[j]
|
240 |
+
|
241 |
+
normal_filename = f"normals_000_{view}.png"
|
242 |
+
rgb_filename = f"rgb_000_{view}.png"
|
243 |
+
normal = save_image_to_disk(normal, os.path.join(normal_dir, normal_filename))
|
244 |
+
color = save_image_to_disk(color, os.path.join(scene_dir, rgb_filename))
|
245 |
+
|
246 |
+
# rm_normal = remove(normal)
|
247 |
+
# rm_color = remove(color)
|
248 |
+
|
249 |
+
# save_image_numpy(rm_normal, os.path.join(scene_dir, normal_filename))
|
250 |
+
# save_image_numpy(rm_color, os.path.join(masked_colors_dir, rgb_filename))
|
251 |
+
|
252 |
+
normals_pred = [save_image(normals_pred[i]) for i in range(bsz)]
|
253 |
+
images_pred = [save_image(images_pred[i]) for i in range(bsz)]
|
254 |
+
|
255 |
+
out = images_pred + normals_pred
|
256 |
+
return out
|
257 |
+
|
258 |
+
|
259 |
+
def process_3d(mode, data_dir, guidance_scale, crop_size):
|
260 |
+
dir = None
|
261 |
+
global scene
|
262 |
+
|
263 |
+
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
264 |
+
|
265 |
+
subprocess.run(
|
266 |
+
f'cd instant-nsr-pl && python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../{data_dir}/cropsize-{crop_size:.1f}-cfg{guidance_scale:.1f}/ dataset.scene={scene} && cd ..',
|
267 |
+
shell=True,
|
268 |
+
)
|
269 |
+
import glob
|
270 |
+
# import pdb
|
271 |
+
|
272 |
+
# pdb.set_trace()
|
273 |
+
|
274 |
+
obj_files = glob.glob(f'{cur_dir}/instant-nsr-pl/exp/{scene}/*/save/*.obj', recursive=True)
|
275 |
+
print(obj_files)
|
276 |
+
if obj_files:
|
277 |
+
dir = obj_files[0]
|
278 |
+
return dir
|
279 |
+
|
280 |
+
|
281 |
+
@dataclass
|
282 |
+
class TestConfig:
|
283 |
+
pretrained_model_name_or_path: str
|
284 |
+
pretrained_unet_path: str
|
285 |
+
revision: Optional[str]
|
286 |
+
validation_dataset: Dict
|
287 |
+
save_dir: str
|
288 |
+
seed: Optional[int]
|
289 |
+
validation_batch_size: int
|
290 |
+
dataloader_num_workers: int
|
291 |
+
|
292 |
+
local_rank: int
|
293 |
+
|
294 |
+
pipe_kwargs: Dict
|
295 |
+
pipe_validation_kwargs: Dict
|
296 |
+
unet_from_pretrained_kwargs: Dict
|
297 |
+
validation_guidance_scales: List[float]
|
298 |
+
validation_grid_nrow: int
|
299 |
+
camera_embedding_lr_mult: float
|
300 |
+
|
301 |
+
num_views: int
|
302 |
+
camera_embedding_type: str
|
303 |
+
|
304 |
+
pred_type: str # joint, or ablation
|
305 |
+
|
306 |
+
enable_xformers_memory_efficient_attention: bool
|
307 |
+
|
308 |
+
cond_on_normals: bool
|
309 |
+
cond_on_colors: bool
|
310 |
+
|
311 |
+
|
312 |
+
def run_demo():
|
313 |
+
from utils.misc import load_config
|
314 |
+
from omegaconf import OmegaConf
|
315 |
+
|
316 |
+
# parse YAML config to OmegaConf
|
317 |
+
cfg = load_config("./configs/mvdiffusion-joint-ortho-6views.yaml")
|
318 |
+
# print(cfg)
|
319 |
+
schema = OmegaConf.structured(TestConfig)
|
320 |
+
cfg = OmegaConf.merge(schema, cfg)
|
321 |
+
|
322 |
+
pipeline = load_wonder3d_pipeline(cfg)
|
323 |
+
torch.set_grad_enabled(False)
|
324 |
+
pipeline.to(f'cuda:{_GPU_ID}')
|
325 |
+
|
326 |
+
predictor = sam_init()
|
327 |
+
|
328 |
+
custom_theme = gr.themes.Soft(primary_hue="blue").set(
|
329 |
+
button_secondary_background_fill="*neutral_100", button_secondary_background_fill_hover="*neutral_200"
|
330 |
+
)
|
331 |
+
custom_css = '''#disp_image {
|
332 |
+
text-align: center; /* Horizontally center the content */
|
333 |
+
}'''
|
334 |
+
|
335 |
+
with gr.Blocks(title=_TITLE, theme=custom_theme, css=custom_css) as demo:
|
336 |
+
with gr.Row():
|
337 |
+
with gr.Column(scale=1):
|
338 |
+
gr.Markdown('# ' + _TITLE)
|
339 |
+
gr.Markdown(_DESCRIPTION)
|
340 |
+
with gr.Row(variant='panel'):
|
341 |
+
with gr.Column(scale=1):
|
342 |
+
input_image = gr.Image(type='pil', image_mode='RGBA', height=320, label='Input image', tool=None)
|
343 |
+
|
344 |
+
with gr.Column(scale=1):
|
345 |
+
processed_image = gr.Image(
|
346 |
+
type='pil',
|
347 |
+
label="Processed Image",
|
348 |
+
interactive=False,
|
349 |
+
height=320,
|
350 |
+
tool=None,
|
351 |
+
image_mode='RGBA',
|
352 |
+
elem_id="disp_image",
|
353 |
+
visible=True,
|
354 |
+
)
|
355 |
+
# with gr.Column(scale=1):
|
356 |
+
# ## add 3D Model
|
357 |
+
# obj_3d = gr.Model3D(
|
358 |
+
# # clear_color=[0.0, 0.0, 0.0, 0.0],
|
359 |
+
# label="3D Model", height=320,
|
360 |
+
# # camera_position=[0,0,2.0]
|
361 |
+
# )
|
362 |
+
processed_image_highres = gr.Image(type='pil', image_mode='RGBA', visible=False, tool=None)
|
363 |
+
with gr.Row(variant='panel'):
|
364 |
+
with gr.Column(scale=1):
|
365 |
+
example_folder = os.path.join(os.path.dirname(__file__), "./example_images")
|
366 |
+
example_fns = [os.path.join(example_folder, example) for example in os.listdir(example_folder)]
|
367 |
+
gr.Examples(
|
368 |
+
examples=example_fns,
|
369 |
+
inputs=[input_image],
|
370 |
+
outputs=[input_image],
|
371 |
+
cache_examples=False,
|
372 |
+
label='Examples (click one of the images below to start)',
|
373 |
+
examples_per_page=30,
|
374 |
+
)
|
375 |
+
with gr.Column(scale=1):
|
376 |
+
with gr.Accordion('Advanced options', open=True):
|
377 |
+
with gr.Row():
|
378 |
+
with gr.Column():
|
379 |
+
input_processing = gr.CheckboxGroup(
|
380 |
+
['Background Removal'],
|
381 |
+
label='Input Image Preprocessing',
|
382 |
+
value=['Background Removal'],
|
383 |
+
info='untick this, if masked image with alpha channel',
|
384 |
+
)
|
385 |
+
with gr.Column():
|
386 |
+
output_processing = gr.CheckboxGroup(
|
387 |
+
['Write Results'], label='write the results in ./outputs folder', value=['Write Results']
|
388 |
+
)
|
389 |
+
with gr.Row():
|
390 |
+
with gr.Column():
|
391 |
+
scale_slider = gr.Slider(1, 5, value=1, step=1, label='Classifier Free Guidance Scale')
|
392 |
+
with gr.Column():
|
393 |
+
steps_slider = gr.Slider(15, 100, value=50, step=1, label='Number of Diffusion Inference Steps')
|
394 |
+
with gr.Row():
|
395 |
+
with gr.Column():
|
396 |
+
seed = gr.Number(42, label='Seed')
|
397 |
+
with gr.Column():
|
398 |
+
crop_size = gr.Number(192, label='Crop size')
|
399 |
+
|
400 |
+
mode = gr.Textbox('train', visible=False)
|
401 |
+
data_dir = gr.Textbox('outputs', visible=False)
|
402 |
+
# crop_size = 192
|
403 |
+
# with gr.Row():
|
404 |
+
# method = gr.Radio(choices=['instant-nsr-pl', 'NeuS'], label='Method (Default: instant-nsr-pl)', value='instant-nsr-pl')
|
405 |
+
run_btn = gr.Button('Generate Normals and Colors', variant='primary', interactive=True)
|
406 |
+
# recon_btn = gr.Button('Reconstruct 3D model', variant='primary', interactive=True)
|
407 |
+
# gr.Markdown("<span style='color:red'>First click Generate button, then click Reconstruct button. Reconstruction may cost several minutes.</span>")
|
408 |
+
|
409 |
+
with gr.Row():
|
410 |
+
view_1 = gr.Image(interactive=False, height=240, show_label=False)
|
411 |
+
view_2 = gr.Image(interactive=False, height=240, show_label=False)
|
412 |
+
view_3 = gr.Image(interactive=False, height=240, show_label=False)
|
413 |
+
view_4 = gr.Image(interactive=False, height=240, show_label=False)
|
414 |
+
view_5 = gr.Image(interactive=False, height=240, show_label=False)
|
415 |
+
view_6 = gr.Image(interactive=False, height=240, show_label=False)
|
416 |
+
with gr.Row():
|
417 |
+
normal_1 = gr.Image(interactive=False, height=240, show_label=False)
|
418 |
+
normal_2 = gr.Image(interactive=False, height=240, show_label=False)
|
419 |
+
normal_3 = gr.Image(interactive=False, height=240, show_label=False)
|
420 |
+
normal_4 = gr.Image(interactive=False, height=240, show_label=False)
|
421 |
+
normal_5 = gr.Image(interactive=False, height=240, show_label=False)
|
422 |
+
normal_6 = gr.Image(interactive=False, height=240, show_label=False)
|
423 |
+
|
424 |
+
run_btn.click(
|
425 |
+
fn=partial(preprocess, predictor), inputs=[input_image, input_processing], outputs=[processed_image_highres, processed_image], queue=True
|
426 |
+
).success(
|
427 |
+
fn=partial(run_pipeline, pipeline, cfg),
|
428 |
+
inputs=[processed_image_highres, scale_slider, steps_slider, seed, crop_size, output_processing],
|
429 |
+
outputs=[view_1, view_2, view_3, view_4, view_5, view_6, normal_1, normal_2, normal_3, normal_4, normal_5, normal_6],
|
430 |
+
)
|
431 |
+
# recon_btn.click(
|
432 |
+
# process_3d, inputs=[mode, data_dir, scale_slider, crop_size], outputs=[obj_3d]
|
433 |
+
# )
|
434 |
+
|
435 |
+
demo.queue().launch(share=True, max_threads=80)
|
436 |
+
|
437 |
+
|
438 |
+
if __name__ == '__main__':
|
439 |
+
fire.Fire(run_demo)
|
gradio_app_recon.py
ADDED
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
import fire
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
+
from functools import partial
|
7 |
+
|
8 |
+
import cv2
|
9 |
+
import time
|
10 |
+
import numpy as np
|
11 |
+
from rembg import remove
|
12 |
+
from segment_anything import sam_model_registry, SamPredictor
|
13 |
+
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
import numpy
|
17 |
+
import torch
|
18 |
+
import rembg
|
19 |
+
import threading
|
20 |
+
import urllib.request
|
21 |
+
from PIL import Image
|
22 |
+
from typing import Dict, Optional, Tuple, List
|
23 |
+
from dataclasses import dataclass
|
24 |
+
import streamlit as st
|
25 |
+
import huggingface_hub
|
26 |
+
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
|
27 |
+
from mvdiffusion.models.unet_mv2d_condition import UNetMV2DConditionModel
|
28 |
+
from mvdiffusion.data.single_image_dataset import SingleImageDataset as MVDiffusionDataset
|
29 |
+
from mvdiffusion.pipelines.pipeline_mvdiffusion_image import MVDiffusionImagePipeline
|
30 |
+
from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
|
31 |
+
from einops import rearrange
|
32 |
+
import numpy as np
|
33 |
+
import subprocess
|
34 |
+
from datetime import datetime
|
35 |
+
|
36 |
+
def save_image(tensor):
|
37 |
+
ndarr = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
|
38 |
+
# pdb.set_trace()
|
39 |
+
im = Image.fromarray(ndarr)
|
40 |
+
return ndarr
|
41 |
+
|
42 |
+
|
43 |
+
def save_image_to_disk(tensor, fp):
|
44 |
+
ndarr = tensor.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy()
|
45 |
+
# pdb.set_trace()
|
46 |
+
im = Image.fromarray(ndarr)
|
47 |
+
im.save(fp)
|
48 |
+
return ndarr
|
49 |
+
|
50 |
+
|
51 |
+
def save_image_numpy(ndarr, fp):
|
52 |
+
im = Image.fromarray(ndarr)
|
53 |
+
im.save(fp)
|
54 |
+
|
55 |
+
|
56 |
+
weight_dtype = torch.float16
|
57 |
+
|
58 |
+
_TITLE = '''Wonder3D: Single Image to 3D using Cross-Domain Diffusion'''
|
59 |
+
_DESCRIPTION = '''
|
60 |
+
<div>
|
61 |
+
Generate consistent multi-view normals maps and color images.
|
62 |
+
<a style="display:inline-block; margin-left: .5em" href='https://github.com/xxlong0/Wonder3D/'><img src='https://img.shields.io/github/stars/xxlong0/Wonder3D?style=social' /></a>
|
63 |
+
</div>
|
64 |
+
<div>
|
65 |
+
The demo does not include the mesh reconstruction part, please visit <a href="https://github.com/xxlong0/Wonder3D/">our github repo</a> to get a textured mesh.
|
66 |
+
</div>
|
67 |
+
'''
|
68 |
+
_GPU_ID = 0
|
69 |
+
|
70 |
+
|
71 |
+
if not hasattr(Image, 'Resampling'):
|
72 |
+
Image.Resampling = Image
|
73 |
+
|
74 |
+
|
75 |
+
def sam_init():
|
76 |
+
sam_checkpoint = os.path.join(os.path.dirname(__file__), "sam_pt", "sam_vit_h_4b8939.pth")
|
77 |
+
model_type = "vit_h"
|
78 |
+
|
79 |
+
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint).to(device=f"cuda:{_GPU_ID}")
|
80 |
+
predictor = SamPredictor(sam)
|
81 |
+
return predictor
|
82 |
+
|
83 |
+
|
84 |
+
def sam_segment(predictor, input_image, *bbox_coords):
|
85 |
+
bbox = np.array(bbox_coords)
|
86 |
+
image = np.asarray(input_image)
|
87 |
+
|
88 |
+
start_time = time.time()
|
89 |
+
predictor.set_image(image)
|
90 |
+
|
91 |
+
masks_bbox, scores_bbox, logits_bbox = predictor.predict(box=bbox, multimask_output=True)
|
92 |
+
|
93 |
+
print(f"SAM Time: {time.time() - start_time:.3f}s")
|
94 |
+
out_image = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
|
95 |
+
out_image[:, :, :3] = image
|
96 |
+
out_image_bbox = out_image.copy()
|
97 |
+
out_image_bbox[:, :, 3] = masks_bbox[-1].astype(np.uint8) * 255
|
98 |
+
torch.cuda.empty_cache()
|
99 |
+
return Image.fromarray(out_image_bbox, mode='RGBA')
|
100 |
+
|
101 |
+
|
102 |
+
def expand2square(pil_img, background_color):
|
103 |
+
width, height = pil_img.size
|
104 |
+
if width == height:
|
105 |
+
return pil_img
|
106 |
+
elif width > height:
|
107 |
+
result = Image.new(pil_img.mode, (width, width), background_color)
|
108 |
+
result.paste(pil_img, (0, (width - height) // 2))
|
109 |
+
return result
|
110 |
+
else:
|
111 |
+
result = Image.new(pil_img.mode, (height, height), background_color)
|
112 |
+
result.paste(pil_img, ((height - width) // 2, 0))
|
113 |
+
return result
|
114 |
+
|
115 |
+
|
116 |
+
def preprocess(predictor, input_image, chk_group=None, segment=True, rescale=False):
|
117 |
+
RES = 1024
|
118 |
+
input_image.thumbnail([RES, RES], Image.Resampling.LANCZOS)
|
119 |
+
if chk_group is not None:
|
120 |
+
segment = "Background Removal" in chk_group
|
121 |
+
rescale = "Rescale" in chk_group
|
122 |
+
if segment:
|
123 |
+
image_rem = input_image.convert('RGBA')
|
124 |
+
image_nobg = remove(image_rem, alpha_matting=True)
|
125 |
+
arr = np.asarray(image_nobg)[:, :, -1]
|
126 |
+
x_nonzero = np.nonzero(arr.sum(axis=0))
|
127 |
+
y_nonzero = np.nonzero(arr.sum(axis=1))
|
128 |
+
x_min = int(x_nonzero[0].min())
|
129 |
+
y_min = int(y_nonzero[0].min())
|
130 |
+
x_max = int(x_nonzero[0].max())
|
131 |
+
y_max = int(y_nonzero[0].max())
|
132 |
+
input_image = sam_segment(predictor, input_image.convert('RGB'), x_min, y_min, x_max, y_max)
|
133 |
+
# Rescale and recenter
|
134 |
+
if rescale:
|
135 |
+
image_arr = np.array(input_image)
|
136 |
+
in_w, in_h = image_arr.shape[:2]
|
137 |
+
out_res = min(RES, max(in_w, in_h))
|
138 |
+
ret, mask = cv2.threshold(np.array(input_image.split()[-1]), 0, 255, cv2.THRESH_BINARY)
|
139 |
+
x, y, w, h = cv2.boundingRect(mask)
|
140 |
+
max_size = max(w, h)
|
141 |
+
ratio = 0.75
|
142 |
+
side_len = int(max_size / ratio)
|
143 |
+
padded_image = np.zeros((side_len, side_len, 4), dtype=np.uint8)
|
144 |
+
center = side_len // 2
|
145 |
+
padded_image[center - h // 2 : center - h // 2 + h, center - w // 2 : center - w // 2 + w] = image_arr[y : y + h, x : x + w]
|
146 |
+
rgba = Image.fromarray(padded_image).resize((out_res, out_res), Image.LANCZOS)
|
147 |
+
|
148 |
+
rgba_arr = np.array(rgba) / 255.0
|
149 |
+
rgb = rgba_arr[..., :3] * rgba_arr[..., -1:] + (1 - rgba_arr[..., -1:])
|
150 |
+
input_image = Image.fromarray((rgb * 255).astype(np.uint8))
|
151 |
+
else:
|
152 |
+
input_image = expand2square(input_image, (127, 127, 127, 0))
|
153 |
+
return input_image, input_image.resize((320, 320), Image.Resampling.LANCZOS)
|
154 |
+
|
155 |
+
|
156 |
+
def load_wonder3d_pipeline(cfg):
|
157 |
+
|
158 |
+
pipeline = MVDiffusionImagePipeline.from_pretrained(
|
159 |
+
cfg.pretrained_model_name_or_path,
|
160 |
+
torch_dtype=weight_dtype
|
161 |
+
)
|
162 |
+
|
163 |
+
# pipeline.to('cuda:0')
|
164 |
+
pipeline.unet.enable_xformers_memory_efficient_attention()
|
165 |
+
|
166 |
+
|
167 |
+
if torch.cuda.is_available():
|
168 |
+
pipeline.to('cuda:0')
|
169 |
+
# sys.main_lock = threading.Lock()
|
170 |
+
return pipeline
|
171 |
+
|
172 |
+
|
173 |
+
from mvdiffusion.data.single_image_dataset import SingleImageDataset
|
174 |
+
|
175 |
+
|
176 |
+
def prepare_data(single_image, crop_size):
|
177 |
+
dataset = SingleImageDataset(root_dir='', num_views=6, img_wh=[256, 256], bg_color='white', crop_size=crop_size, single_image=single_image)
|
178 |
+
return dataset[0]
|
179 |
+
|
180 |
+
scene = 'scene'
|
181 |
+
|
182 |
+
def run_pipeline(pipeline, cfg, single_image, guidance_scale, steps, seed, crop_size, chk_group=None):
|
183 |
+
import pdb
|
184 |
+
global scene
|
185 |
+
# pdb.set_trace()
|
186 |
+
|
187 |
+
if chk_group is not None:
|
188 |
+
write_image = "Write Results" in chk_group
|
189 |
+
|
190 |
+
batch = prepare_data(single_image, crop_size)
|
191 |
+
|
192 |
+
pipeline.set_progress_bar_config(disable=True)
|
193 |
+
seed = int(seed)
|
194 |
+
generator = torch.Generator(device=pipeline.unet.device).manual_seed(seed)
|
195 |
+
|
196 |
+
# repeat (2B, Nv, 3, H, W)
|
197 |
+
imgs_in = torch.cat([batch['imgs_in']] * 2, dim=0).to(weight_dtype)
|
198 |
+
|
199 |
+
# (2B, Nv, Nce)
|
200 |
+
camera_embeddings = torch.cat([batch['camera_embeddings']] * 2, dim=0).to(weight_dtype)
|
201 |
+
|
202 |
+
task_embeddings = torch.cat([batch['normal_task_embeddings'], batch['color_task_embeddings']], dim=0).to(weight_dtype)
|
203 |
+
|
204 |
+
camera_embeddings = torch.cat([camera_embeddings, task_embeddings], dim=-1).to(weight_dtype)
|
205 |
+
|
206 |
+
# (B*Nv, 3, H, W)
|
207 |
+
imgs_in = rearrange(imgs_in, "Nv C H W -> (Nv) C H W")
|
208 |
+
# (B*Nv, Nce)
|
209 |
+
# camera_embeddings = rearrange(camera_embeddings, "B Nv Nce -> (B Nv) Nce")
|
210 |
+
|
211 |
+
out = pipeline(
|
212 |
+
imgs_in,
|
213 |
+
camera_embeddings,
|
214 |
+
generator=generator,
|
215 |
+
guidance_scale=guidance_scale,
|
216 |
+
num_inference_steps=steps,
|
217 |
+
output_type='pt',
|
218 |
+
num_images_per_prompt=1,
|
219 |
+
**cfg.pipe_validation_kwargs,
|
220 |
+
).images
|
221 |
+
|
222 |
+
bsz = out.shape[0] // 2
|
223 |
+
normals_pred = out[:bsz]
|
224 |
+
images_pred = out[bsz:]
|
225 |
+
num_views = 6
|
226 |
+
if write_image:
|
227 |
+
VIEWS = ['front', 'front_right', 'right', 'back', 'left', 'front_left']
|
228 |
+
cur_dir = os.path.join("./outputs", f"cropsize-{int(crop_size)}-cfg{guidance_scale:.1f}")
|
229 |
+
|
230 |
+
scene = 'scene'+datetime.now().strftime('@%Y%m%d-%H%M%S')
|
231 |
+
scene_dir = os.path.join(cur_dir, scene)
|
232 |
+
normal_dir = os.path.join(scene_dir, "normals")
|
233 |
+
masked_colors_dir = os.path.join(scene_dir, "masked_colors")
|
234 |
+
os.makedirs(normal_dir, exist_ok=True)
|
235 |
+
os.makedirs(masked_colors_dir, exist_ok=True)
|
236 |
+
for j in range(num_views):
|
237 |
+
view = VIEWS[j]
|
238 |
+
normal = normals_pred[j]
|
239 |
+
color = images_pred[j]
|
240 |
+
|
241 |
+
normal_filename = f"normals_000_{view}.png"
|
242 |
+
rgb_filename = f"rgb_000_{view}.png"
|
243 |
+
normal = save_image_to_disk(normal, os.path.join(normal_dir, normal_filename))
|
244 |
+
color = save_image_to_disk(color, os.path.join(scene_dir, rgb_filename))
|
245 |
+
|
246 |
+
rm_normal = remove(normal)
|
247 |
+
rm_color = remove(color)
|
248 |
+
|
249 |
+
save_image_numpy(rm_normal, os.path.join(scene_dir, normal_filename))
|
250 |
+
save_image_numpy(rm_color, os.path.join(masked_colors_dir, rgb_filename))
|
251 |
+
|
252 |
+
normals_pred = [save_image(normals_pred[i]) for i in range(bsz)]
|
253 |
+
images_pred = [save_image(images_pred[i]) for i in range(bsz)]
|
254 |
+
|
255 |
+
out = images_pred + normals_pred
|
256 |
+
return out
|
257 |
+
|
258 |
+
|
259 |
+
def process_3d(mode, data_dir, guidance_scale, crop_size):
|
260 |
+
dir = None
|
261 |
+
global scene
|
262 |
+
|
263 |
+
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
264 |
+
|
265 |
+
subprocess.run(
|
266 |
+
f'cd instant-nsr-pl && python launch.py --config configs/neuralangelo-ortho-wmask.yaml --gpu 0 --train dataset.root_dir=../{data_dir}/cropsize-{int(crop_size)}-cfg{guidance_scale:.1f}/ dataset.scene={scene} && cd ..',
|
267 |
+
shell=True,
|
268 |
+
)
|
269 |
+
import glob
|
270 |
+
# import pdb
|
271 |
+
|
272 |
+
# pdb.set_trace()
|
273 |
+
|
274 |
+
obj_files = glob.glob(f'{cur_dir}/instant-nsr-pl/exp/{scene}/*/save/*.obj', recursive=True)
|
275 |
+
print(obj_files)
|
276 |
+
if obj_files:
|
277 |
+
dir = obj_files[0]
|
278 |
+
return dir
|
279 |
+
|
280 |
+
|
281 |
+
@dataclass
|
282 |
+
class TestConfig:
|
283 |
+
pretrained_model_name_or_path: str
|
284 |
+
pretrained_unet_path: str
|
285 |
+
revision: Optional[str]
|
286 |
+
validation_dataset: Dict
|
287 |
+
save_dir: str
|
288 |
+
seed: Optional[int]
|
289 |
+
validation_batch_size: int
|
290 |
+
dataloader_num_workers: int
|
291 |
+
|
292 |
+
local_rank: int
|
293 |
+
|
294 |
+
pipe_kwargs: Dict
|
295 |
+
pipe_validation_kwargs: Dict
|
296 |
+
unet_from_pretrained_kwargs: Dict
|
297 |
+
validation_guidance_scales: List[float]
|
298 |
+
validation_grid_nrow: int
|
299 |
+
camera_embedding_lr_mult: float
|
300 |
+
|
301 |
+
num_views: int
|
302 |
+
camera_embedding_type: str
|
303 |
+
|
304 |
+
pred_type: str # joint, or ablation
|
305 |
+
|
306 |
+
enable_xformers_memory_efficient_attention: bool
|
307 |
+
|
308 |
+
cond_on_normals: bool
|
309 |
+
cond_on_colors: bool
|
310 |
+
|
311 |
+
|
312 |
+
def run_demo():
|
313 |
+
from utils.misc import load_config
|
314 |
+
from omegaconf import OmegaConf
|
315 |
+
|
316 |
+
# parse YAML config to OmegaConf
|
317 |
+
cfg = load_config("./configs/mvdiffusion-joint-ortho-6views.yaml")
|
318 |
+
# print(cfg)
|
319 |
+
schema = OmegaConf.structured(TestConfig)
|
320 |
+
cfg = OmegaConf.merge(schema, cfg)
|
321 |
+
|
322 |
+
pipeline = load_wonder3d_pipeline(cfg)
|
323 |
+
torch.set_grad_enabled(False)
|
324 |
+
pipeline.to(f'cuda:{_GPU_ID}')
|
325 |
+
|
326 |
+
predictor = sam_init()
|
327 |
+
|
328 |
+
custom_theme = gr.themes.Soft(primary_hue="blue").set(
|
329 |
+
button_secondary_background_fill="*neutral_100", button_secondary_background_fill_hover="*neutral_200"
|
330 |
+
)
|
331 |
+
custom_css = '''#disp_image {
|
332 |
+
text-align: center; /* Horizontally center the content */
|
333 |
+
}'''
|
334 |
+
|
335 |
+
with gr.Blocks(title=_TITLE, theme=custom_theme, css=custom_css) as demo:
|
336 |
+
with gr.Row():
|
337 |
+
with gr.Column(scale=1):
|
338 |
+
gr.Markdown('# ' + _TITLE)
|
339 |
+
gr.Markdown(_DESCRIPTION)
|
340 |
+
with gr.Row(variant='panel'):
|
341 |
+
with gr.Column(scale=1):
|
342 |
+
input_image = gr.Image(type='pil', image_mode='RGBA', height=320, label='Input image', tool=None)
|
343 |
+
|
344 |
+
with gr.Column(scale=1):
|
345 |
+
processed_image = gr.Image(
|
346 |
+
type='pil',
|
347 |
+
label="Processed Image",
|
348 |
+
interactive=False,
|
349 |
+
height=320,
|
350 |
+
tool=None,
|
351 |
+
image_mode='RGBA',
|
352 |
+
elem_id="disp_image",
|
353 |
+
visible=True,
|
354 |
+
)
|
355 |
+
with gr.Column(scale=1):
|
356 |
+
## add 3D Model
|
357 |
+
obj_3d = gr.Model3D(
|
358 |
+
# clear_color=[0.0, 0.0, 0.0, 0.0],
|
359 |
+
label="3D Model", height=320,
|
360 |
+
# camera_position=[0,0,2.0]
|
361 |
+
)
|
362 |
+
processed_image_highres = gr.Image(type='pil', image_mode='RGBA', visible=False, tool=None)
|
363 |
+
with gr.Row(variant='panel'):
|
364 |
+
with gr.Column(scale=1):
|
365 |
+
example_folder = os.path.join(os.path.dirname(__file__), "./example_images")
|
366 |
+
example_fns = [os.path.join(example_folder, example) for example in os.listdir(example_folder)]
|
367 |
+
gr.Examples(
|
368 |
+
examples=example_fns,
|
369 |
+
inputs=[input_image],
|
370 |
+
outputs=[input_image],
|
371 |
+
cache_examples=False,
|
372 |
+
label='Examples (click one of the images below to start)',
|
373 |
+
examples_per_page=30,
|
374 |
+
)
|
375 |
+
with gr.Column(scale=1):
|
376 |
+
with gr.Accordion('Advanced options', open=True):
|
377 |
+
with gr.Row():
|
378 |
+
with gr.Column():
|
379 |
+
input_processing = gr.CheckboxGroup(
|
380 |
+
['Background Removal'],
|
381 |
+
label='Input Image Preprocessing',
|
382 |
+
value=['Background Removal'],
|
383 |
+
info='untick this, if masked image with alpha channel',
|
384 |
+
)
|
385 |
+
with gr.Column():
|
386 |
+
output_processing = gr.CheckboxGroup(
|
387 |
+
['Write Results'], label='write the results in ./outputs folder', value=['Write Results']
|
388 |
+
)
|
389 |
+
with gr.Row():
|
390 |
+
with gr.Column():
|
391 |
+
scale_slider = gr.Slider(1, 5, value=1, step=1, label='Classifier Free Guidance Scale')
|
392 |
+
with gr.Column():
|
393 |
+
steps_slider = gr.Slider(15, 100, value=50, step=1, label='Number of Diffusion Inference Steps')
|
394 |
+
with gr.Row():
|
395 |
+
with gr.Column():
|
396 |
+
seed = gr.Number(42, label='Seed')
|
397 |
+
with gr.Column():
|
398 |
+
crop_size = gr.Number(192, label='Crop size')
|
399 |
+
|
400 |
+
mode = gr.Textbox('train', visible=False)
|
401 |
+
data_dir = gr.Textbox('outputs', visible=False)
|
402 |
+
# crop_size = 192
|
403 |
+
# with gr.Row():
|
404 |
+
# method = gr.Radio(choices=['instant-nsr-pl', 'NeuS'], label='Method (Default: instant-nsr-pl)', value='instant-nsr-pl')
|
405 |
+
# run_btn = gr.Button('Generate Normals and Colors', variant='primary', interactive=True)
|
406 |
+
run_btn = gr.Button('Reconstruct 3D model', variant='primary', interactive=True)
|
407 |
+
gr.Markdown("<span style='color:red'> Reconstruction may cost several minutes. Check results in instant-nsr-pl/exp/scene@{current-time}/ </span>")
|
408 |
+
|
409 |
+
with gr.Row():
|
410 |
+
view_1 = gr.Image(interactive=False, height=240, show_label=False)
|
411 |
+
view_2 = gr.Image(interactive=False, height=240, show_label=False)
|
412 |
+
view_3 = gr.Image(interactive=False, height=240, show_label=False)
|
413 |
+
view_4 = gr.Image(interactive=False, height=240, show_label=False)
|
414 |
+
view_5 = gr.Image(interactive=False, height=240, show_label=False)
|
415 |
+
view_6 = gr.Image(interactive=False, height=240, show_label=False)
|
416 |
+
with gr.Row():
|
417 |
+
normal_1 = gr.Image(interactive=False, height=240, show_label=False)
|
418 |
+
normal_2 = gr.Image(interactive=False, height=240, show_label=False)
|
419 |
+
normal_3 = gr.Image(interactive=False, height=240, show_label=False)
|
420 |
+
normal_4 = gr.Image(interactive=False, height=240, show_label=False)
|
421 |
+
normal_5 = gr.Image(interactive=False, height=240, show_label=False)
|
422 |
+
normal_6 = gr.Image(interactive=False, height=240, show_label=False)
|
423 |
+
|
424 |
+
run_btn.click(
|
425 |
+
fn=partial(preprocess, predictor), inputs=[input_image, input_processing], outputs=[processed_image_highres, processed_image], queue=True
|
426 |
+
).success(
|
427 |
+
fn=partial(run_pipeline, pipeline, cfg),
|
428 |
+
inputs=[processed_image_highres, scale_slider, steps_slider, seed, crop_size, output_processing],
|
429 |
+
outputs=[view_1, view_2, view_3, view_4, view_5, view_6, normal_1, normal_2, normal_3, normal_4, normal_5, normal_6],
|
430 |
+
).success(
|
431 |
+
process_3d, inputs=[mode, data_dir, scale_slider, crop_size], outputs=[obj_3d]
|
432 |
+
)
|
433 |
+
|
434 |
+
demo.queue().launch(share=True, max_threads=80)
|
435 |
+
|
436 |
+
|
437 |
+
if __name__ == '__main__':
|
438 |
+
fire.Fire(run_demo)
|
instant-nsr-pl/README.md
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Instant Neural Surface Reconstruction
|
2 |
+
|
3 |
+
This repository contains a concise and extensible implementation of NeRF and NeuS for neural surface reconstruction based on Instant-NGP and the Pytorch-Lightning framework. **Training on a NeRF-Synthetic scene takes ~5min for NeRF and ~10min for NeuS on a single RTX3090.**
|
4 |
+
|
5 |
+
||NeRF in 5min|NeuS in 10 min|
|
6 |
+
|---|---|---|
|
7 |
+
|Rendering|![rendering-nerf](https://user-images.githubusercontent.com/19284678/199078178-b719676b-7e60-47f1-813b-c0b533f5480d.png)|![rendering-neus](https://user-images.githubusercontent.com/19284678/199078300-ebcf249d-b05e-431f-b035-da354705d8db.png)|
|
8 |
+
|Mesh|![mesh-nerf](https://user-images.githubusercontent.com/19284678/199078661-b5cd569a-c22b-4220-9c11-d5fd13a52fb8.png)|![mesh-neus](https://user-images.githubusercontent.com/19284678/199078481-164e36a6-6d55-45cc-aaf3-795a114e4a38.png)|
|
9 |
+
|
10 |
+
|
11 |
+
## Features
|
12 |
+
**This repository aims to provide a highly efficient while customizable boilerplate for research projects based on NeRF or NeuS.**
|
13 |
+
|
14 |
+
- acceleration techniques from [Instant-NGP](https://github.com/NVlabs/instant-ngp): multiresolution hash encoding and fully fused networks by [tiny-cuda-nn](https://github.com/NVlabs/tiny-cuda-nn), occupancy grid pruning and rendering by [nerfacc](https://github.com/KAIR-BAIR/nerfacc)
|
15 |
+
- out-of-the-box multi-GPU and mixed precision training by [PyTorch-Lightning](https://github.com/Lightning-AI/lightning)
|
16 |
+
- hierarchical project layout that is designed to be easily customized and extended, flexible experiment configuration by [OmegaConf](https://github.com/omry/omegaconf)
|
17 |
+
|
18 |
+
**Please subscribe to [#26](https://github.com/bennyguo/instant-nsr-pl/issues/26) for our latest findings on quality improvements!**
|
19 |
+
|
20 |
+
## News
|
21 |
+
|
22 |
+
🔥🔥🔥 Check out my new project on 3D content generation: https://github.com/threestudio-project/threestudio 🔥🔥🔥
|
23 |
+
|
24 |
+
- 06/03/2023: Add an implementation of [Neuralangelo](https://research.nvidia.com/labs/dir/neuralangelo/). See [here](https://github.com/bennyguo/instant-nsr-pl#training-on-DTU) for details.
|
25 |
+
- 03/31/2023: NeuS model now supports background modeling. You could try on the DTU dataset provided by [NeuS](https://drive.google.com/drive/folders/1Nlzejs4mfPuJYORLbDEUDWlc9IZIbU0C?usp=sharing) or [IDR](https://www.dropbox.com/sh/5tam07ai8ch90pf/AADniBT3dmAexvm_J1oL__uoa) following [the instruction here](https://github.com/bennyguo/instant-nsr-pl#training-on-DTU).
|
26 |
+
- 02/11/2023: NeRF model now supports unbounded 360 scenes with learned background. You could try on [MipNeRF 360 data](http://storage.googleapis.com/gresearch/refraw360/360_v2.zip) following [the COLMAP configuration](https://github.com/bennyguo/instant-nsr-pl#training-on-custom-colmap-data).
|
27 |
+
|
28 |
+
## Requirements
|
29 |
+
**Note:**
|
30 |
+
- To utilize multiresolution hash encoding or fully fused networks provided by tiny-cuda-nn, you should have least an RTX 2080Ti, see [https://github.com/NVlabs/tiny-cuda-nn#requirements](https://github.com/NVlabs/tiny-cuda-nn#requirements) for more details.
|
31 |
+
- Multi-GPU training is currently not supported on Windows (see [#4](https://github.com/bennyguo/instant-nsr-pl/issues/4)).
|
32 |
+
### Environments
|
33 |
+
- Install PyTorch>=1.10 [here](https://pytorch.org/get-started/locally/) based the package management tool you used and your cuda version (older PyTorch versions may work but have not been tested)
|
34 |
+
- Install tiny-cuda-nn PyTorch extension: `pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch`
|
35 |
+
- `pip install -r requirements.txt`
|
36 |
+
|
37 |
+
|
38 |
+
## Run
|
39 |
+
### Training on NeRF-Synthetic
|
40 |
+
Download the NeRF-Synthetic data [here](https://drive.google.com/drive/folders/128yBriW1IG_3NJ5Rp7APSTZsJqdJdfc1) and put it under `load/`. The file structure should be like `load/nerf_synthetic/lego`.
|
41 |
+
|
42 |
+
Run the launch script with `--train`, specifying the config file, the GPU(s) to be used (GPU 0 will be used by default), and the scene name:
|
43 |
+
```bash
|
44 |
+
# train NeRF
|
45 |
+
python launch.py --config configs/nerf-blender.yaml --gpu 0 --train dataset.scene=lego tag=example
|
46 |
+
|
47 |
+
# train NeuS with mask
|
48 |
+
python launch.py --config configs/neus-blender.yaml --gpu 0 --train dataset.scene=lego tag=example
|
49 |
+
# train NeuS without mask
|
50 |
+
python launch.py --config configs/neus-blender.yaml --gpu 0 --train dataset.scene=lego tag=example system.loss.lambda_mask=0.0
|
51 |
+
```
|
52 |
+
The code snapshots, checkpoints and experiment outputs are saved to `exp/[name]/[tag]@[timestamp]`, and tensorboard logs can be found at `runs/[name]/[tag]@[timestamp]`. You can change any configuration in the YAML file by specifying arguments without `--`, for example:
|
53 |
+
```bash
|
54 |
+
python launch.py --config configs/nerf-blender.yaml --gpu 0 --train dataset.scene=lego tag=iter50k seed=0 trainer.max_steps=50000
|
55 |
+
```
|
56 |
+
### Training on DTU
|
57 |
+
Download preprocessed DTU data provided by [NeuS](https://drive.google.com/drive/folders/1Nlzejs4mfPuJYORLbDEUDWlc9IZIbU0C?usp=sharing) or [IDR](https://www.dropbox.com/sh/5tam07ai8ch90pf/AADniBT3dmAexvm_J1oL__uoa). In the provided config files we assume using NeuS DTU data. If you are using IDR DTU data, please set `dataset.cameras_file=cameras.npz`. You may also need to adjust `dataset.root_dir` to point to your downloaded data location.
|
58 |
+
```bash
|
59 |
+
# train NeuS on DTU without mask
|
60 |
+
python launch.py --config configs/neus-dtu.yaml --gpu 0 --train
|
61 |
+
# train NeuS on DTU with mask
|
62 |
+
python launch.py --config configs/neus-dtu-wmask.yaml --gpu 0 --train
|
63 |
+
# train NeuS on DTU with mask using tricks from Neuralangelo (experimental)
|
64 |
+
python launch.py --config configs/neuralangelo-dtu-wmask.yaml --gpu 0 --train
|
65 |
+
```
|
66 |
+
Notes:
|
67 |
+
- PSNR in the testing stage is meaningless, as we simply compare to pure white images in testing.
|
68 |
+
- The results of Neuralangelo can't reach those in the original paper. Some potential improvements: more iterations; larger `system.geometry.xyz_encoding_config.update_steps`; larger `system.geometry.xyz_encoding_config.n_features_per_level`; larger `system.geometry.xyz_encoding_config.log2_hashmap_size`; adopting curvature loss.
|
69 |
+
|
70 |
+
### Training on Custom COLMAP Data
|
71 |
+
To get COLMAP data from custom images, you should have COLMAP installed (see [here](https://colmap.github.io/install.html) for installation instructions). Then put your images in the `images/` folder, and run `scripts/imgs2poses.py` specifying the path containing the `images/` folder. For example:
|
72 |
+
```bash
|
73 |
+
python scripts/imgs2poses.py ./load/bmvs_dog # images are in ./load/bmvs_dog/images
|
74 |
+
```
|
75 |
+
Existing data following this file structure also works as long as images are store in `images/` and there is a `sparse/` folder for the COLMAP output, for example [the data provided by MipNeRF 360](http://storage.googleapis.com/gresearch/refraw360/360_v2.zip). An optional `masks/` folder could be provided for object mask supervision. To train on COLMAP data, please refer to the example config files `config/*-colmap.yaml`. Some notes:
|
76 |
+
- Adapt the `root_dir` and `img_wh` (or `img_downscale`) option in the config file to your data;
|
77 |
+
- The scene is normalized so that cameras have a minimum distance `1.0` to the center of the scene. Setting `model.radius=1.0` works in most cases. If not, try setting a smaller radius that wraps tightly to your foreground object.
|
78 |
+
- There are three choices to determine the scene center: `dataset.center_est_method=camera` uses the center of all camera positions as the scene center; `dataset.center_est_method=lookat` assumes the cameras are looking at the same point and calculates an approximate look-at point as the scene center; `dataset.center_est_method=point` uses the center of all points (reconstructed by COLMAP) that are bounded by cameras as the scene center. Please choose an appropriate method according to your capture.
|
79 |
+
- PSNR in the testing stage is meaningless, as we simply compare to pure white images in testing.
|
80 |
+
|
81 |
+
### Testing
|
82 |
+
The training procedure are by default followed by testing, which computes metrics on test data, generates animations and exports the geometry as triangular meshes. If you want to do testing alone, just resume the pretrained model and replace `--train` with `--test`, for example:
|
83 |
+
```bash
|
84 |
+
python launch.py --config path/to/your/exp/config/parsed.yaml --resume path/to/your/exp/ckpt/epoch=0-step=20000.ckpt --gpu 0 --test
|
85 |
+
```
|
86 |
+
|
87 |
+
|
88 |
+
## Benchmarks
|
89 |
+
All experiments are conducted on a single NVIDIA RTX3090.
|
90 |
+
|
91 |
+
|PSNR|Chair|Drums|Ficus|Hotdog|Lego|Materials|Mic|Ship|Avg.|
|
92 |
+
|---|---|---|---|---|---|---|---|---|---|
|
93 |
+
|NeRF Paper|33.00|25.01|30.13|36.18|32.54|29.62|32.91|28.65|31.01|
|
94 |
+
|NeRF Ours (20k)|34.80|26.04|33.89|37.42|35.33|29.46|35.22|31.17|32.92|
|
95 |
+
|NeuS Ours (20k, with masks)|34.04|25.26|32.47|35.94|33.78|27.67|33.43|29.50|31.51|
|
96 |
+
|
97 |
+
|Training Time (mm:ss)|Chair|Drums|Ficus|Hotdog|Lego|Materials|Mic|Ship|Avg.|
|
98 |
+
|---|---|---|---|---|---|---|---|---|---|
|
99 |
+
|NeRF Ours (20k)|04:34|04:35|04:18|04:46|04:39|04:35|04:26|05:41|04:42|
|
100 |
+
|NeuS Ours (20k, with masks)|11:25|10:34|09:51|12:11|11:37|11:46|09:59|16:25|11:44|
|
101 |
+
|
102 |
+
|
103 |
+
## TODO
|
104 |
+
- [✅] Support more dataset formats, like COLMAP outputs and DTU
|
105 |
+
- [✅] Support simple background model
|
106 |
+
- [ ] Support GUI training and interaction
|
107 |
+
- [ ] More illustrations about the framework
|
108 |
+
|
109 |
+
## Related Projects
|
110 |
+
- [ngp_pl](https://github.com/kwea123/ngp_pl): Great Instant-NGP implementation in PyTorch-Lightning! Background model and GUI supported.
|
111 |
+
- [Instant-NSR](https://github.com/zhaofuq/Instant-NSR): NeuS implementation using multiresolution hash encoding.
|
112 |
+
|
113 |
+
## Citation
|
114 |
+
If you find this codebase useful, please consider citing:
|
115 |
+
```
|
116 |
+
@misc{instant-nsr-pl,
|
117 |
+
Author = {Yuan-Chen Guo},
|
118 |
+
Year = {2022},
|
119 |
+
Note = {https://github.com/bennyguo/instant-nsr-pl},
|
120 |
+
Title = {Instant Neural Surface Reconstruction}
|
121 |
+
}
|
122 |
+
```
|
instant-nsr-pl/configs/neuralangelo-ortho-wmask.yaml
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: ${basename:${dataset.scene}}
|
2 |
+
tag: ""
|
3 |
+
seed: 42
|
4 |
+
|
5 |
+
dataset:
|
6 |
+
name: ortho
|
7 |
+
root_dir: /home/xiaoxiao/Workplace/wonder3Dplus/outputs/joint-twice/aigc/cropsize-224-cfg1.0
|
8 |
+
cam_pose_dir: null
|
9 |
+
scene: scene_name
|
10 |
+
imSize: [1024, 1024] # should use larger res, otherwise the exported mesh has wrong colors
|
11 |
+
camera_type: ortho
|
12 |
+
apply_mask: true
|
13 |
+
camera_params: null
|
14 |
+
view_weights: [1.0, 0.8, 0.2, 1.0, 0.4, 0.7] #['front', 'front_right', 'right', 'back', 'left', 'front_left']
|
15 |
+
# view_weights: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
|
16 |
+
|
17 |
+
model:
|
18 |
+
name: neus
|
19 |
+
radius: 1.0
|
20 |
+
num_samples_per_ray: 1024
|
21 |
+
train_num_rays: 256
|
22 |
+
max_train_num_rays: 8192
|
23 |
+
grid_prune: true
|
24 |
+
grid_prune_occ_thre: 0.001
|
25 |
+
dynamic_ray_sampling: true
|
26 |
+
batch_image_sampling: true
|
27 |
+
randomized: true
|
28 |
+
ray_chunk: 2048
|
29 |
+
cos_anneal_end: 20000
|
30 |
+
learned_background: false
|
31 |
+
background_color: black
|
32 |
+
variance:
|
33 |
+
init_val: 0.3
|
34 |
+
modulate: false
|
35 |
+
geometry:
|
36 |
+
name: volume-sdf
|
37 |
+
radius: ${model.radius}
|
38 |
+
feature_dim: 13
|
39 |
+
grad_type: finite_difference
|
40 |
+
finite_difference_eps: progressive
|
41 |
+
isosurface:
|
42 |
+
method: mc
|
43 |
+
resolution: 192
|
44 |
+
chunk: 2097152
|
45 |
+
threshold: 0.
|
46 |
+
xyz_encoding_config:
|
47 |
+
otype: ProgressiveBandHashGrid
|
48 |
+
n_levels: 10 # 12 modify
|
49 |
+
n_features_per_level: 2
|
50 |
+
log2_hashmap_size: 19
|
51 |
+
base_resolution: 32
|
52 |
+
per_level_scale: 1.3195079107728942
|
53 |
+
include_xyz: true
|
54 |
+
start_level: 4
|
55 |
+
start_step: 0
|
56 |
+
update_steps: 1000
|
57 |
+
mlp_network_config:
|
58 |
+
otype: VanillaMLP
|
59 |
+
activation: ReLU
|
60 |
+
output_activation: none
|
61 |
+
n_neurons: 64
|
62 |
+
n_hidden_layers: 1
|
63 |
+
sphere_init: true
|
64 |
+
sphere_init_radius: 0.5
|
65 |
+
weight_norm: true
|
66 |
+
texture:
|
67 |
+
name: volume-radiance
|
68 |
+
input_feature_dim: ${add:${model.geometry.feature_dim},3} # surface normal as additional input
|
69 |
+
dir_encoding_config:
|
70 |
+
otype: SphericalHarmonics
|
71 |
+
degree: 4
|
72 |
+
mlp_network_config:
|
73 |
+
otype: VanillaMLP
|
74 |
+
activation: ReLU
|
75 |
+
output_activation: none
|
76 |
+
n_neurons: 64
|
77 |
+
n_hidden_layers: 2
|
78 |
+
color_activation: sigmoid
|
79 |
+
|
80 |
+
system:
|
81 |
+
name: ortho-neus-system
|
82 |
+
loss:
|
83 |
+
lambda_rgb_mse: 0.5
|
84 |
+
lambda_rgb_l1: 0.
|
85 |
+
lambda_mask: 1.0
|
86 |
+
lambda_eikonal: 0.2 # cannot be too large, will cause holes to thin objects
|
87 |
+
lambda_normal: 1.0 # cannot be too large
|
88 |
+
lambda_3d_normal_smooth: 1.0
|
89 |
+
# lambda_curvature: [0, 0.0, 1.e-4, 1000] # topology warmup
|
90 |
+
lambda_curvature: 0.
|
91 |
+
lambda_sparsity: 0.5
|
92 |
+
lambda_distortion: 0.0
|
93 |
+
lambda_distortion_bg: 0.0
|
94 |
+
lambda_opaque: 0.0
|
95 |
+
sparsity_scale: 100.0
|
96 |
+
geo_aware: true
|
97 |
+
rgb_p_ratio: 0.8
|
98 |
+
normal_p_ratio: 0.8
|
99 |
+
mask_p_ratio: 0.9
|
100 |
+
optimizer:
|
101 |
+
name: AdamW
|
102 |
+
args:
|
103 |
+
lr: 0.01
|
104 |
+
betas: [0.9, 0.99]
|
105 |
+
eps: 1.e-15
|
106 |
+
params:
|
107 |
+
geometry:
|
108 |
+
lr: 0.001
|
109 |
+
texture:
|
110 |
+
lr: 0.01
|
111 |
+
variance:
|
112 |
+
lr: 0.001
|
113 |
+
constant_steps: 500
|
114 |
+
scheduler:
|
115 |
+
name: SequentialLR
|
116 |
+
interval: step
|
117 |
+
milestones:
|
118 |
+
- ${system.constant_steps}
|
119 |
+
schedulers:
|
120 |
+
- name: ConstantLR
|
121 |
+
args:
|
122 |
+
factor: 1.0
|
123 |
+
total_iters: ${system.constant_steps}
|
124 |
+
- name: ExponentialLR
|
125 |
+
args:
|
126 |
+
gamma: ${calc_exp_lr_decay_rate:0.1,${sub:${trainer.max_steps},${system.constant_steps}}}
|
127 |
+
|
128 |
+
checkpoint:
|
129 |
+
save_top_k: -1
|
130 |
+
every_n_train_steps: ${trainer.max_steps}
|
131 |
+
|
132 |
+
export:
|
133 |
+
chunk_size: 2097152
|
134 |
+
export_vertex_color: True
|
135 |
+
ortho_scale: 1.35 #modify
|
136 |
+
|
137 |
+
trainer:
|
138 |
+
max_steps: 3000
|
139 |
+
log_every_n_steps: 100
|
140 |
+
num_sanity_val_steps: 0
|
141 |
+
val_check_interval: 4000
|
142 |
+
limit_train_batches: 1.0
|
143 |
+
limit_val_batches: 2
|
144 |
+
enable_progress_bar: true
|
145 |
+
precision: 16
|
instant-nsr-pl/datasets/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datasets = {}
|
2 |
+
|
3 |
+
|
4 |
+
def register(name):
|
5 |
+
def decorator(cls):
|
6 |
+
datasets[name] = cls
|
7 |
+
return cls
|
8 |
+
return decorator
|
9 |
+
|
10 |
+
|
11 |
+
def make(name, config):
|
12 |
+
dataset = datasets[name](config)
|
13 |
+
return dataset
|
14 |
+
|
15 |
+
|
16 |
+
from . import blender, colmap, dtu, ortho
|
instant-nsr-pl/datasets/blender.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch.utils.data import Dataset, DataLoader, IterableDataset
|
9 |
+
import torchvision.transforms.functional as TF
|
10 |
+
|
11 |
+
import pytorch_lightning as pl
|
12 |
+
|
13 |
+
import datasets
|
14 |
+
from models.ray_utils import get_ray_directions
|
15 |
+
from utils.misc import get_rank
|
16 |
+
|
17 |
+
|
18 |
+
class BlenderDatasetBase():
|
19 |
+
def setup(self, config, split):
|
20 |
+
self.config = config
|
21 |
+
self.split = split
|
22 |
+
self.rank = get_rank()
|
23 |
+
|
24 |
+
self.has_mask = True
|
25 |
+
self.apply_mask = True
|
26 |
+
|
27 |
+
with open(os.path.join(self.config.root_dir, f"transforms_{self.split}.json"), 'r') as f:
|
28 |
+
meta = json.load(f)
|
29 |
+
|
30 |
+
if 'w' in meta and 'h' in meta:
|
31 |
+
W, H = int(meta['w']), int(meta['h'])
|
32 |
+
else:
|
33 |
+
W, H = 800, 800
|
34 |
+
|
35 |
+
if 'img_wh' in self.config:
|
36 |
+
w, h = self.config.img_wh
|
37 |
+
assert round(W / w * h) == H
|
38 |
+
elif 'img_downscale' in self.config:
|
39 |
+
w, h = W // self.config.img_downscale, H // self.config.img_downscale
|
40 |
+
else:
|
41 |
+
raise KeyError("Either img_wh or img_downscale should be specified.")
|
42 |
+
|
43 |
+
self.w, self.h = w, h
|
44 |
+
self.img_wh = (self.w, self.h)
|
45 |
+
|
46 |
+
self.near, self.far = self.config.near_plane, self.config.far_plane
|
47 |
+
|
48 |
+
self.focal = 0.5 * w / math.tan(0.5 * meta['camera_angle_x']) # scaled focal length
|
49 |
+
|
50 |
+
# ray directions for all pixels, same for all images (same H, W, focal)
|
51 |
+
self.directions = \
|
52 |
+
get_ray_directions(self.w, self.h, self.focal, self.focal, self.w//2, self.h//2).to(self.rank) # (h, w, 3)
|
53 |
+
|
54 |
+
self.all_c2w, self.all_images, self.all_fg_masks = [], [], []
|
55 |
+
|
56 |
+
for i, frame in enumerate(meta['frames']):
|
57 |
+
c2w = torch.from_numpy(np.array(frame['transform_matrix'])[:3, :4])
|
58 |
+
self.all_c2w.append(c2w)
|
59 |
+
|
60 |
+
img_path = os.path.join(self.config.root_dir, f"{frame['file_path']}.png")
|
61 |
+
img = Image.open(img_path)
|
62 |
+
img = img.resize(self.img_wh, Image.BICUBIC)
|
63 |
+
img = TF.to_tensor(img).permute(1, 2, 0) # (4, h, w) => (h, w, 4)
|
64 |
+
|
65 |
+
self.all_fg_masks.append(img[..., -1]) # (h, w)
|
66 |
+
self.all_images.append(img[...,:3])
|
67 |
+
|
68 |
+
self.all_c2w, self.all_images, self.all_fg_masks = \
|
69 |
+
torch.stack(self.all_c2w, dim=0).float().to(self.rank), \
|
70 |
+
torch.stack(self.all_images, dim=0).float().to(self.rank), \
|
71 |
+
torch.stack(self.all_fg_masks, dim=0).float().to(self.rank)
|
72 |
+
|
73 |
+
|
74 |
+
class BlenderDataset(Dataset, BlenderDatasetBase):
|
75 |
+
def __init__(self, config, split):
|
76 |
+
self.setup(config, split)
|
77 |
+
|
78 |
+
def __len__(self):
|
79 |
+
return len(self.all_images)
|
80 |
+
|
81 |
+
def __getitem__(self, index):
|
82 |
+
return {
|
83 |
+
'index': index
|
84 |
+
}
|
85 |
+
|
86 |
+
|
87 |
+
class BlenderIterableDataset(IterableDataset, BlenderDatasetBase):
|
88 |
+
def __init__(self, config, split):
|
89 |
+
self.setup(config, split)
|
90 |
+
|
91 |
+
def __iter__(self):
|
92 |
+
while True:
|
93 |
+
yield {}
|
94 |
+
|
95 |
+
|
96 |
+
@datasets.register('blender')
|
97 |
+
class BlenderDataModule(pl.LightningDataModule):
|
98 |
+
def __init__(self, config):
|
99 |
+
super().__init__()
|
100 |
+
self.config = config
|
101 |
+
|
102 |
+
def setup(self, stage=None):
|
103 |
+
if stage in [None, 'fit']:
|
104 |
+
self.train_dataset = BlenderIterableDataset(self.config, self.config.train_split)
|
105 |
+
if stage in [None, 'fit', 'validate']:
|
106 |
+
self.val_dataset = BlenderDataset(self.config, self.config.val_split)
|
107 |
+
if stage in [None, 'test']:
|
108 |
+
self.test_dataset = BlenderDataset(self.config, self.config.test_split)
|
109 |
+
if stage in [None, 'predict']:
|
110 |
+
self.predict_dataset = BlenderDataset(self.config, self.config.train_split)
|
111 |
+
|
112 |
+
def prepare_data(self):
|
113 |
+
pass
|
114 |
+
|
115 |
+
def general_loader(self, dataset, batch_size):
|
116 |
+
sampler = None
|
117 |
+
return DataLoader(
|
118 |
+
dataset,
|
119 |
+
num_workers=os.cpu_count(),
|
120 |
+
batch_size=batch_size,
|
121 |
+
pin_memory=True,
|
122 |
+
sampler=sampler
|
123 |
+
)
|
124 |
+
|
125 |
+
def train_dataloader(self):
|
126 |
+
return self.general_loader(self.train_dataset, batch_size=1)
|
127 |
+
|
128 |
+
def val_dataloader(self):
|
129 |
+
return self.general_loader(self.val_dataset, batch_size=1)
|
130 |
+
|
131 |
+
def test_dataloader(self):
|
132 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
133 |
+
|
134 |
+
def predict_dataloader(self):
|
135 |
+
return self.general_loader(self.predict_dataset, batch_size=1)
|
instant-nsr-pl/datasets/colmap.py
ADDED
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from torch.utils.data import Dataset, DataLoader, IterableDataset
|
9 |
+
import torchvision.transforms.functional as TF
|
10 |
+
|
11 |
+
import pytorch_lightning as pl
|
12 |
+
|
13 |
+
import datasets
|
14 |
+
from datasets.colmap_utils import \
|
15 |
+
read_cameras_binary, read_images_binary, read_points3d_binary
|
16 |
+
from models.ray_utils import get_ray_directions
|
17 |
+
from utils.misc import get_rank
|
18 |
+
|
19 |
+
|
20 |
+
def get_center(pts):
|
21 |
+
center = pts.mean(0)
|
22 |
+
dis = (pts - center[None,:]).norm(p=2, dim=-1)
|
23 |
+
mean, std = dis.mean(), dis.std()
|
24 |
+
q25, q75 = torch.quantile(dis, 0.25), torch.quantile(dis, 0.75)
|
25 |
+
valid = (dis > mean - 1.5 * std) & (dis < mean + 1.5 * std) & (dis > mean - (q75 - q25) * 1.5) & (dis < mean + (q75 - q25) * 1.5)
|
26 |
+
center = pts[valid].mean(0)
|
27 |
+
return center
|
28 |
+
|
29 |
+
def normalize_poses(poses, pts, up_est_method, center_est_method):
|
30 |
+
if center_est_method == 'camera':
|
31 |
+
# estimation scene center as the average of all camera positions
|
32 |
+
center = poses[...,3].mean(0)
|
33 |
+
elif center_est_method == 'lookat':
|
34 |
+
# estimation scene center as the average of the intersection of selected pairs of camera rays
|
35 |
+
cams_ori = poses[...,3]
|
36 |
+
cams_dir = poses[:,:3,:3] @ torch.as_tensor([0.,0.,-1.])
|
37 |
+
cams_dir = F.normalize(cams_dir, dim=-1)
|
38 |
+
A = torch.stack([cams_dir, -cams_dir.roll(1,0)], dim=-1)
|
39 |
+
b = -cams_ori + cams_ori.roll(1,0)
|
40 |
+
t = torch.linalg.lstsq(A, b).solution
|
41 |
+
center = (torch.stack([cams_dir, cams_dir.roll(1,0)], dim=-1) * t[:,None,:] + torch.stack([cams_ori, cams_ori.roll(1,0)], dim=-1)).mean((0,2))
|
42 |
+
elif center_est_method == 'point':
|
43 |
+
# first estimation scene center as the average of all camera positions
|
44 |
+
# later we'll use the center of all points bounded by the cameras as the final scene center
|
45 |
+
center = poses[...,3].mean(0)
|
46 |
+
else:
|
47 |
+
raise NotImplementedError(f'Unknown center estimation method: {center_est_method}')
|
48 |
+
|
49 |
+
if up_est_method == 'ground':
|
50 |
+
# estimate up direction as the normal of the estimated ground plane
|
51 |
+
# use RANSAC to estimate the ground plane in the point cloud
|
52 |
+
import pyransac3d as pyrsc
|
53 |
+
ground = pyrsc.Plane()
|
54 |
+
plane_eq, inliers = ground.fit(pts.numpy(), thresh=0.01) # TODO: determine thresh based on scene scale
|
55 |
+
plane_eq = torch.as_tensor(plane_eq) # A, B, C, D in Ax + By + Cz + D = 0
|
56 |
+
z = F.normalize(plane_eq[:3], dim=-1) # plane normal as up direction
|
57 |
+
signed_distance = (torch.cat([pts, torch.ones_like(pts[...,0:1])], dim=-1) * plane_eq).sum(-1)
|
58 |
+
if signed_distance.mean() < 0:
|
59 |
+
z = -z # flip the direction if points lie under the plane
|
60 |
+
elif up_est_method == 'camera':
|
61 |
+
# estimate up direction as the average of all camera up directions
|
62 |
+
z = F.normalize((poses[...,3] - center).mean(0), dim=0)
|
63 |
+
else:
|
64 |
+
raise NotImplementedError(f'Unknown up estimation method: {up_est_method}')
|
65 |
+
|
66 |
+
# new axis
|
67 |
+
y_ = torch.as_tensor([z[1], -z[0], 0.])
|
68 |
+
x = F.normalize(y_.cross(z), dim=0)
|
69 |
+
y = z.cross(x)
|
70 |
+
|
71 |
+
if center_est_method == 'point':
|
72 |
+
# rotation
|
73 |
+
Rc = torch.stack([x, y, z], dim=1)
|
74 |
+
R = Rc.T
|
75 |
+
poses_homo = torch.cat([poses, torch.as_tensor([[[0.,0.,0.,1.]]]).expand(poses.shape[0], -1, -1)], dim=1)
|
76 |
+
inv_trans = torch.cat([torch.cat([R, torch.as_tensor([[0.,0.,0.]]).T], dim=1), torch.as_tensor([[0.,0.,0.,1.]])], dim=0)
|
77 |
+
poses_norm = (inv_trans @ poses_homo)[:,:3]
|
78 |
+
pts = (inv_trans @ torch.cat([pts, torch.ones_like(pts[:,0:1])], dim=-1)[...,None])[:,:3,0]
|
79 |
+
|
80 |
+
# translation and scaling
|
81 |
+
poses_min, poses_max = poses_norm[...,3].min(0)[0], poses_norm[...,3].max(0)[0]
|
82 |
+
pts_fg = pts[(poses_min[0] < pts[:,0]) & (pts[:,0] < poses_max[0]) & (poses_min[1] < pts[:,1]) & (pts[:,1] < poses_max[1])]
|
83 |
+
center = get_center(pts_fg)
|
84 |
+
tc = center.reshape(3, 1)
|
85 |
+
t = -tc
|
86 |
+
poses_homo = torch.cat([poses_norm, torch.as_tensor([[[0.,0.,0.,1.]]]).expand(poses_norm.shape[0], -1, -1)], dim=1)
|
87 |
+
inv_trans = torch.cat([torch.cat([torch.eye(3), t], dim=1), torch.as_tensor([[0.,0.,0.,1.]])], dim=0)
|
88 |
+
poses_norm = (inv_trans @ poses_homo)[:,:3]
|
89 |
+
scale = poses_norm[...,3].norm(p=2, dim=-1).min()
|
90 |
+
poses_norm[...,3] /= scale
|
91 |
+
pts = (inv_trans @ torch.cat([pts, torch.ones_like(pts[:,0:1])], dim=-1)[...,None])[:,:3,0]
|
92 |
+
pts = pts / scale
|
93 |
+
else:
|
94 |
+
# rotation and translation
|
95 |
+
Rc = torch.stack([x, y, z], dim=1)
|
96 |
+
tc = center.reshape(3, 1)
|
97 |
+
R, t = Rc.T, -Rc.T @ tc
|
98 |
+
poses_homo = torch.cat([poses, torch.as_tensor([[[0.,0.,0.,1.]]]).expand(poses.shape[0], -1, -1)], dim=1)
|
99 |
+
inv_trans = torch.cat([torch.cat([R, t], dim=1), torch.as_tensor([[0.,0.,0.,1.]])], dim=0)
|
100 |
+
poses_norm = (inv_trans @ poses_homo)[:,:3] # (N_images, 4, 4)
|
101 |
+
|
102 |
+
# scaling
|
103 |
+
scale = poses_norm[...,3].norm(p=2, dim=-1).min()
|
104 |
+
poses_norm[...,3] /= scale
|
105 |
+
|
106 |
+
# apply the transformation to the point cloud
|
107 |
+
pts = (inv_trans @ torch.cat([pts, torch.ones_like(pts[:,0:1])], dim=-1)[...,None])[:,:3,0]
|
108 |
+
pts = pts / scale
|
109 |
+
|
110 |
+
return poses_norm, pts
|
111 |
+
|
112 |
+
def create_spheric_poses(cameras, n_steps=120):
|
113 |
+
center = torch.as_tensor([0.,0.,0.], dtype=cameras.dtype, device=cameras.device)
|
114 |
+
mean_d = (cameras - center[None,:]).norm(p=2, dim=-1).mean()
|
115 |
+
mean_h = cameras[:,2].mean()
|
116 |
+
r = (mean_d**2 - mean_h**2).sqrt()
|
117 |
+
up = torch.as_tensor([0., 0., 1.], dtype=center.dtype, device=center.device)
|
118 |
+
|
119 |
+
all_c2w = []
|
120 |
+
for theta in torch.linspace(0, 2 * math.pi, n_steps):
|
121 |
+
cam_pos = torch.stack([r * theta.cos(), r * theta.sin(), mean_h])
|
122 |
+
l = F.normalize(center - cam_pos, p=2, dim=0)
|
123 |
+
s = F.normalize(l.cross(up), p=2, dim=0)
|
124 |
+
u = F.normalize(s.cross(l), p=2, dim=0)
|
125 |
+
c2w = torch.cat([torch.stack([s, u, -l], dim=1), cam_pos[:,None]], axis=1)
|
126 |
+
all_c2w.append(c2w)
|
127 |
+
|
128 |
+
all_c2w = torch.stack(all_c2w, dim=0)
|
129 |
+
|
130 |
+
return all_c2w
|
131 |
+
|
132 |
+
class ColmapDatasetBase():
|
133 |
+
# the data only has to be processed once
|
134 |
+
initialized = False
|
135 |
+
properties = {}
|
136 |
+
|
137 |
+
def setup(self, config, split):
|
138 |
+
self.config = config
|
139 |
+
self.split = split
|
140 |
+
self.rank = get_rank()
|
141 |
+
|
142 |
+
if not ColmapDatasetBase.initialized:
|
143 |
+
camdata = read_cameras_binary(os.path.join(self.config.root_dir, 'sparse/0/cameras.bin'))
|
144 |
+
|
145 |
+
H = int(camdata[1].height)
|
146 |
+
W = int(camdata[1].width)
|
147 |
+
|
148 |
+
if 'img_wh' in self.config:
|
149 |
+
w, h = self.config.img_wh
|
150 |
+
assert round(W / w * h) == H
|
151 |
+
elif 'img_downscale' in self.config:
|
152 |
+
w, h = int(W / self.config.img_downscale + 0.5), int(H / self.config.img_downscale + 0.5)
|
153 |
+
else:
|
154 |
+
raise KeyError("Either img_wh or img_downscale should be specified.")
|
155 |
+
|
156 |
+
img_wh = (w, h)
|
157 |
+
factor = w / W
|
158 |
+
|
159 |
+
if camdata[1].model == 'SIMPLE_RADIAL':
|
160 |
+
fx = fy = camdata[1].params[0] * factor
|
161 |
+
cx = camdata[1].params[1] * factor
|
162 |
+
cy = camdata[1].params[2] * factor
|
163 |
+
elif camdata[1].model in ['PINHOLE', 'OPENCV']:
|
164 |
+
fx = camdata[1].params[0] * factor
|
165 |
+
fy = camdata[1].params[1] * factor
|
166 |
+
cx = camdata[1].params[2] * factor
|
167 |
+
cy = camdata[1].params[3] * factor
|
168 |
+
else:
|
169 |
+
raise ValueError(f"Please parse the intrinsics for camera model {camdata[1].model}!")
|
170 |
+
|
171 |
+
directions = get_ray_directions(w, h, fx, fy, cx, cy).to(self.rank)
|
172 |
+
|
173 |
+
imdata = read_images_binary(os.path.join(self.config.root_dir, 'sparse/0/images.bin'))
|
174 |
+
|
175 |
+
mask_dir = os.path.join(self.config.root_dir, 'masks')
|
176 |
+
has_mask = os.path.exists(mask_dir) # TODO: support partial masks
|
177 |
+
apply_mask = has_mask and self.config.apply_mask
|
178 |
+
|
179 |
+
all_c2w, all_images, all_fg_masks = [], [], []
|
180 |
+
|
181 |
+
for i, d in enumerate(imdata.values()):
|
182 |
+
R = d.qvec2rotmat()
|
183 |
+
t = d.tvec.reshape(3, 1)
|
184 |
+
c2w = torch.from_numpy(np.concatenate([R.T, -R.T@t], axis=1)).float()
|
185 |
+
c2w[:,1:3] *= -1. # COLMAP => OpenGL
|
186 |
+
all_c2w.append(c2w)
|
187 |
+
if self.split in ['train', 'val']:
|
188 |
+
img_path = os.path.join(self.config.root_dir, 'images', d.name)
|
189 |
+
img = Image.open(img_path)
|
190 |
+
img = img.resize(img_wh, Image.BICUBIC)
|
191 |
+
img = TF.to_tensor(img).permute(1, 2, 0)[...,:3]
|
192 |
+
img = img.to(self.rank) if self.config.load_data_on_gpu else img.cpu()
|
193 |
+
if has_mask:
|
194 |
+
mask_paths = [os.path.join(mask_dir, d.name), os.path.join(mask_dir, d.name[3:])]
|
195 |
+
mask_paths = list(filter(os.path.exists, mask_paths))
|
196 |
+
assert len(mask_paths) == 1
|
197 |
+
mask = Image.open(mask_paths[0]).convert('L') # (H, W, 1)
|
198 |
+
mask = mask.resize(img_wh, Image.BICUBIC)
|
199 |
+
mask = TF.to_tensor(mask)[0]
|
200 |
+
else:
|
201 |
+
mask = torch.ones_like(img[...,0], device=img.device)
|
202 |
+
all_fg_masks.append(mask) # (h, w)
|
203 |
+
all_images.append(img)
|
204 |
+
|
205 |
+
all_c2w = torch.stack(all_c2w, dim=0)
|
206 |
+
|
207 |
+
pts3d = read_points3d_binary(os.path.join(self.config.root_dir, 'sparse/0/points3D.bin'))
|
208 |
+
pts3d = torch.from_numpy(np.array([pts3d[k].xyz for k in pts3d])).float()
|
209 |
+
all_c2w, pts3d = normalize_poses(all_c2w, pts3d, up_est_method=self.config.up_est_method, center_est_method=self.config.center_est_method)
|
210 |
+
|
211 |
+
ColmapDatasetBase.properties = {
|
212 |
+
'w': w,
|
213 |
+
'h': h,
|
214 |
+
'img_wh': img_wh,
|
215 |
+
'factor': factor,
|
216 |
+
'has_mask': has_mask,
|
217 |
+
'apply_mask': apply_mask,
|
218 |
+
'directions': directions,
|
219 |
+
'pts3d': pts3d,
|
220 |
+
'all_c2w': all_c2w,
|
221 |
+
'all_images': all_images,
|
222 |
+
'all_fg_masks': all_fg_masks
|
223 |
+
}
|
224 |
+
|
225 |
+
ColmapDatasetBase.initialized = True
|
226 |
+
|
227 |
+
for k, v in ColmapDatasetBase.properties.items():
|
228 |
+
setattr(self, k, v)
|
229 |
+
|
230 |
+
if self.split == 'test':
|
231 |
+
self.all_c2w = create_spheric_poses(self.all_c2w[:,:,3], n_steps=self.config.n_test_traj_steps)
|
232 |
+
self.all_images = torch.zeros((self.config.n_test_traj_steps, self.h, self.w, 3), dtype=torch.float32)
|
233 |
+
self.all_fg_masks = torch.zeros((self.config.n_test_traj_steps, self.h, self.w), dtype=torch.float32)
|
234 |
+
else:
|
235 |
+
self.all_images, self.all_fg_masks = torch.stack(self.all_images, dim=0).float(), torch.stack(self.all_fg_masks, dim=0).float()
|
236 |
+
|
237 |
+
"""
|
238 |
+
# for debug use
|
239 |
+
from models.ray_utils import get_rays
|
240 |
+
rays_o, rays_d = get_rays(self.directions.cpu(), self.all_c2w, keepdim=True)
|
241 |
+
pts_out = []
|
242 |
+
pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 1.0 0.0 0.0' for l in rays_o[:,0,0].reshape(-1, 3).tolist()]))
|
243 |
+
|
244 |
+
t_vals = torch.linspace(0, 1, 8)
|
245 |
+
z_vals = 0.05 * (1 - t_vals) + 0.5 * t_vals
|
246 |
+
|
247 |
+
ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,0,0][..., None, :])
|
248 |
+
pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 1.0 0.0' for l in ray_pts.view(-1, 3).tolist()]))
|
249 |
+
|
250 |
+
ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,self.h-1,0][..., None, :])
|
251 |
+
pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 0.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))
|
252 |
+
|
253 |
+
ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,0,self.w-1][..., None, :])
|
254 |
+
pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 1.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))
|
255 |
+
|
256 |
+
ray_pts = (rays_o[:,0,0][..., None, :] + z_vals[..., None] * rays_d[:,self.h-1,self.w-1][..., None, :])
|
257 |
+
pts_out.append('\n'.join([' '.join([str(p) for p in l]) + ' 1.0 1.0 1.0' for l in ray_pts.view(-1, 3).tolist()]))
|
258 |
+
|
259 |
+
open('cameras.txt', 'w').write('\n'.join(pts_out))
|
260 |
+
open('scene.txt', 'w').write('\n'.join([' '.join([str(p) for p in l]) + ' 0.0 0.0 0.0' for l in self.pts3d.view(-1, 3).tolist()]))
|
261 |
+
|
262 |
+
exit(1)
|
263 |
+
"""
|
264 |
+
|
265 |
+
self.all_c2w = self.all_c2w.float().to(self.rank)
|
266 |
+
if self.config.load_data_on_gpu:
|
267 |
+
self.all_images = self.all_images.to(self.rank)
|
268 |
+
self.all_fg_masks = self.all_fg_masks.to(self.rank)
|
269 |
+
|
270 |
+
|
271 |
+
class ColmapDataset(Dataset, ColmapDatasetBase):
|
272 |
+
def __init__(self, config, split):
|
273 |
+
self.setup(config, split)
|
274 |
+
|
275 |
+
def __len__(self):
|
276 |
+
return len(self.all_images)
|
277 |
+
|
278 |
+
def __getitem__(self, index):
|
279 |
+
return {
|
280 |
+
'index': index
|
281 |
+
}
|
282 |
+
|
283 |
+
|
284 |
+
class ColmapIterableDataset(IterableDataset, ColmapDatasetBase):
|
285 |
+
def __init__(self, config, split):
|
286 |
+
self.setup(config, split)
|
287 |
+
|
288 |
+
def __iter__(self):
|
289 |
+
while True:
|
290 |
+
yield {}
|
291 |
+
|
292 |
+
|
293 |
+
@datasets.register('colmap')
|
294 |
+
class ColmapDataModule(pl.LightningDataModule):
|
295 |
+
def __init__(self, config):
|
296 |
+
super().__init__()
|
297 |
+
self.config = config
|
298 |
+
|
299 |
+
def setup(self, stage=None):
|
300 |
+
if stage in [None, 'fit']:
|
301 |
+
self.train_dataset = ColmapIterableDataset(self.config, 'train')
|
302 |
+
if stage in [None, 'fit', 'validate']:
|
303 |
+
self.val_dataset = ColmapDataset(self.config, self.config.get('val_split', 'train'))
|
304 |
+
if stage in [None, 'test']:
|
305 |
+
self.test_dataset = ColmapDataset(self.config, self.config.get('test_split', 'test'))
|
306 |
+
if stage in [None, 'predict']:
|
307 |
+
self.predict_dataset = ColmapDataset(self.config, 'train')
|
308 |
+
|
309 |
+
def prepare_data(self):
|
310 |
+
pass
|
311 |
+
|
312 |
+
def general_loader(self, dataset, batch_size):
|
313 |
+
sampler = None
|
314 |
+
return DataLoader(
|
315 |
+
dataset,
|
316 |
+
num_workers=os.cpu_count(),
|
317 |
+
batch_size=batch_size,
|
318 |
+
pin_memory=True,
|
319 |
+
sampler=sampler
|
320 |
+
)
|
321 |
+
|
322 |
+
def train_dataloader(self):
|
323 |
+
return self.general_loader(self.train_dataset, batch_size=1)
|
324 |
+
|
325 |
+
def val_dataloader(self):
|
326 |
+
return self.general_loader(self.val_dataset, batch_size=1)
|
327 |
+
|
328 |
+
def test_dataloader(self):
|
329 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
330 |
+
|
331 |
+
def predict_dataloader(self):
|
332 |
+
return self.general_loader(self.predict_dataset, batch_size=1)
|
instant-nsr-pl/datasets/colmap_utils.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# Redistribution and use in source and binary forms, with or without
|
5 |
+
# modification, are permitted provided that the following conditions are met:
|
6 |
+
#
|
7 |
+
# * Redistributions of source code must retain the above copyright
|
8 |
+
# notice, this list of conditions and the following disclaimer.
|
9 |
+
#
|
10 |
+
# * Redistributions in binary form must reproduce the above copyright
|
11 |
+
# notice, this list of conditions and the following disclaimer in the
|
12 |
+
# documentation and/or other materials provided with the distribution.
|
13 |
+
#
|
14 |
+
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
|
15 |
+
# its contributors may be used to endorse or promote products derived
|
16 |
+
# from this software without specific prior written permission.
|
17 |
+
#
|
18 |
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
19 |
+
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
20 |
+
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
21 |
+
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
|
22 |
+
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
23 |
+
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
24 |
+
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
25 |
+
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
26 |
+
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
27 |
+
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
28 |
+
# POSSIBILITY OF SUCH DAMAGE.
|
29 |
+
#
|
30 |
+
# Author: Johannes L. Schoenberger (jsch at inf.ethz.ch)
|
31 |
+
|
32 |
+
import os
|
33 |
+
import collections
|
34 |
+
import numpy as np
|
35 |
+
import struct
|
36 |
+
|
37 |
+
|
38 |
+
CameraModel = collections.namedtuple(
|
39 |
+
"CameraModel", ["model_id", "model_name", "num_params"])
|
40 |
+
Camera = collections.namedtuple(
|
41 |
+
"Camera", ["id", "model", "width", "height", "params"])
|
42 |
+
BaseImage = collections.namedtuple(
|
43 |
+
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
|
44 |
+
Point3D = collections.namedtuple(
|
45 |
+
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
|
46 |
+
|
47 |
+
class Image(BaseImage):
|
48 |
+
def qvec2rotmat(self):
|
49 |
+
return qvec2rotmat(self.qvec)
|
50 |
+
|
51 |
+
|
52 |
+
CAMERA_MODELS = {
|
53 |
+
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
|
54 |
+
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
|
55 |
+
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
|
56 |
+
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
|
57 |
+
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
|
58 |
+
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
|
59 |
+
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
|
60 |
+
CameraModel(model_id=7, model_name="FOV", num_params=5),
|
61 |
+
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
|
62 |
+
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
|
63 |
+
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
|
64 |
+
}
|
65 |
+
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) \
|
66 |
+
for camera_model in CAMERA_MODELS])
|
67 |
+
|
68 |
+
|
69 |
+
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
|
70 |
+
"""Read and unpack the next bytes from a binary file.
|
71 |
+
:param fid:
|
72 |
+
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
|
73 |
+
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
|
74 |
+
:param endian_character: Any of {@, =, <, >, !}
|
75 |
+
:return: Tuple of read and unpacked values.
|
76 |
+
"""
|
77 |
+
data = fid.read(num_bytes)
|
78 |
+
return struct.unpack(endian_character + format_char_sequence, data)
|
79 |
+
|
80 |
+
|
81 |
+
def read_cameras_text(path):
|
82 |
+
"""
|
83 |
+
see: src/base/reconstruction.cc
|
84 |
+
void Reconstruction::WriteCamerasText(const std::string& path)
|
85 |
+
void Reconstruction::ReadCamerasText(const std::string& path)
|
86 |
+
"""
|
87 |
+
cameras = {}
|
88 |
+
with open(path, "r") as fid:
|
89 |
+
while True:
|
90 |
+
line = fid.readline()
|
91 |
+
if not line:
|
92 |
+
break
|
93 |
+
line = line.strip()
|
94 |
+
if len(line) > 0 and line[0] != "#":
|
95 |
+
elems = line.split()
|
96 |
+
camera_id = int(elems[0])
|
97 |
+
model = elems[1]
|
98 |
+
width = int(elems[2])
|
99 |
+
height = int(elems[3])
|
100 |
+
params = np.array(tuple(map(float, elems[4:])))
|
101 |
+
cameras[camera_id] = Camera(id=camera_id, model=model,
|
102 |
+
width=width, height=height,
|
103 |
+
params=params)
|
104 |
+
return cameras
|
105 |
+
|
106 |
+
|
107 |
+
def read_cameras_binary(path_to_model_file):
|
108 |
+
"""
|
109 |
+
see: src/base/reconstruction.cc
|
110 |
+
void Reconstruction::WriteCamerasBinary(const std::string& path)
|
111 |
+
void Reconstruction::ReadCamerasBinary(const std::string& path)
|
112 |
+
"""
|
113 |
+
cameras = {}
|
114 |
+
with open(path_to_model_file, "rb") as fid:
|
115 |
+
num_cameras = read_next_bytes(fid, 8, "Q")[0]
|
116 |
+
for camera_line_index in range(num_cameras):
|
117 |
+
camera_properties = read_next_bytes(
|
118 |
+
fid, num_bytes=24, format_char_sequence="iiQQ")
|
119 |
+
camera_id = camera_properties[0]
|
120 |
+
model_id = camera_properties[1]
|
121 |
+
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
|
122 |
+
width = camera_properties[2]
|
123 |
+
height = camera_properties[3]
|
124 |
+
num_params = CAMERA_MODEL_IDS[model_id].num_params
|
125 |
+
params = read_next_bytes(fid, num_bytes=8*num_params,
|
126 |
+
format_char_sequence="d"*num_params)
|
127 |
+
cameras[camera_id] = Camera(id=camera_id,
|
128 |
+
model=model_name,
|
129 |
+
width=width,
|
130 |
+
height=height,
|
131 |
+
params=np.array(params))
|
132 |
+
assert len(cameras) == num_cameras
|
133 |
+
return cameras
|
134 |
+
|
135 |
+
|
136 |
+
def read_images_text(path):
|
137 |
+
"""
|
138 |
+
see: src/base/reconstruction.cc
|
139 |
+
void Reconstruction::ReadImagesText(const std::string& path)
|
140 |
+
void Reconstruction::WriteImagesText(const std::string& path)
|
141 |
+
"""
|
142 |
+
images = {}
|
143 |
+
with open(path, "r") as fid:
|
144 |
+
while True:
|
145 |
+
line = fid.readline()
|
146 |
+
if not line:
|
147 |
+
break
|
148 |
+
line = line.strip()
|
149 |
+
if len(line) > 0 and line[0] != "#":
|
150 |
+
elems = line.split()
|
151 |
+
image_id = int(elems[0])
|
152 |
+
qvec = np.array(tuple(map(float, elems[1:5])))
|
153 |
+
tvec = np.array(tuple(map(float, elems[5:8])))
|
154 |
+
camera_id = int(elems[8])
|
155 |
+
image_name = elems[9]
|
156 |
+
elems = fid.readline().split()
|
157 |
+
xys = np.column_stack([tuple(map(float, elems[0::3])),
|
158 |
+
tuple(map(float, elems[1::3]))])
|
159 |
+
point3D_ids = np.array(tuple(map(int, elems[2::3])))
|
160 |
+
images[image_id] = Image(
|
161 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
162 |
+
camera_id=camera_id, name=image_name,
|
163 |
+
xys=xys, point3D_ids=point3D_ids)
|
164 |
+
return images
|
165 |
+
|
166 |
+
|
167 |
+
def read_images_binary(path_to_model_file):
|
168 |
+
"""
|
169 |
+
see: src/base/reconstruction.cc
|
170 |
+
void Reconstruction::ReadImagesBinary(const std::string& path)
|
171 |
+
void Reconstruction::WriteImagesBinary(const std::string& path)
|
172 |
+
"""
|
173 |
+
images = {}
|
174 |
+
with open(path_to_model_file, "rb") as fid:
|
175 |
+
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
|
176 |
+
for image_index in range(num_reg_images):
|
177 |
+
binary_image_properties = read_next_bytes(
|
178 |
+
fid, num_bytes=64, format_char_sequence="idddddddi")
|
179 |
+
image_id = binary_image_properties[0]
|
180 |
+
qvec = np.array(binary_image_properties[1:5])
|
181 |
+
tvec = np.array(binary_image_properties[5:8])
|
182 |
+
camera_id = binary_image_properties[8]
|
183 |
+
image_name = ""
|
184 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
185 |
+
while current_char != b"\x00": # look for the ASCII 0 entry
|
186 |
+
image_name += current_char.decode("utf-8")
|
187 |
+
current_char = read_next_bytes(fid, 1, "c")[0]
|
188 |
+
num_points2D = read_next_bytes(fid, num_bytes=8,
|
189 |
+
format_char_sequence="Q")[0]
|
190 |
+
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
|
191 |
+
format_char_sequence="ddq"*num_points2D)
|
192 |
+
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
|
193 |
+
tuple(map(float, x_y_id_s[1::3]))])
|
194 |
+
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
|
195 |
+
images[image_id] = Image(
|
196 |
+
id=image_id, qvec=qvec, tvec=tvec,
|
197 |
+
camera_id=camera_id, name=image_name,
|
198 |
+
xys=xys, point3D_ids=point3D_ids)
|
199 |
+
return images
|
200 |
+
|
201 |
+
|
202 |
+
def read_points3D_text(path):
|
203 |
+
"""
|
204 |
+
see: src/base/reconstruction.cc
|
205 |
+
void Reconstruction::ReadPoints3DText(const std::string& path)
|
206 |
+
void Reconstruction::WritePoints3DText(const std::string& path)
|
207 |
+
"""
|
208 |
+
points3D = {}
|
209 |
+
with open(path, "r") as fid:
|
210 |
+
while True:
|
211 |
+
line = fid.readline()
|
212 |
+
if not line:
|
213 |
+
break
|
214 |
+
line = line.strip()
|
215 |
+
if len(line) > 0 and line[0] != "#":
|
216 |
+
elems = line.split()
|
217 |
+
point3D_id = int(elems[0])
|
218 |
+
xyz = np.array(tuple(map(float, elems[1:4])))
|
219 |
+
rgb = np.array(tuple(map(int, elems[4:7])))
|
220 |
+
error = float(elems[7])
|
221 |
+
image_ids = np.array(tuple(map(int, elems[8::2])))
|
222 |
+
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
|
223 |
+
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
|
224 |
+
error=error, image_ids=image_ids,
|
225 |
+
point2D_idxs=point2D_idxs)
|
226 |
+
return points3D
|
227 |
+
|
228 |
+
|
229 |
+
def read_points3d_binary(path_to_model_file):
|
230 |
+
"""
|
231 |
+
see: src/base/reconstruction.cc
|
232 |
+
void Reconstruction::ReadPoints3DBinary(const std::string& path)
|
233 |
+
void Reconstruction::WritePoints3DBinary(const std::string& path)
|
234 |
+
"""
|
235 |
+
points3D = {}
|
236 |
+
with open(path_to_model_file, "rb") as fid:
|
237 |
+
num_points = read_next_bytes(fid, 8, "Q")[0]
|
238 |
+
for point_line_index in range(num_points):
|
239 |
+
binary_point_line_properties = read_next_bytes(
|
240 |
+
fid, num_bytes=43, format_char_sequence="QdddBBBd")
|
241 |
+
point3D_id = binary_point_line_properties[0]
|
242 |
+
xyz = np.array(binary_point_line_properties[1:4])
|
243 |
+
rgb = np.array(binary_point_line_properties[4:7])
|
244 |
+
error = np.array(binary_point_line_properties[7])
|
245 |
+
track_length = read_next_bytes(
|
246 |
+
fid, num_bytes=8, format_char_sequence="Q")[0]
|
247 |
+
track_elems = read_next_bytes(
|
248 |
+
fid, num_bytes=8*track_length,
|
249 |
+
format_char_sequence="ii"*track_length)
|
250 |
+
image_ids = np.array(tuple(map(int, track_elems[0::2])))
|
251 |
+
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
|
252 |
+
points3D[point3D_id] = Point3D(
|
253 |
+
id=point3D_id, xyz=xyz, rgb=rgb,
|
254 |
+
error=error, image_ids=image_ids,
|
255 |
+
point2D_idxs=point2D_idxs)
|
256 |
+
return points3D
|
257 |
+
|
258 |
+
|
259 |
+
def read_model(path, ext):
|
260 |
+
if ext == ".txt":
|
261 |
+
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
|
262 |
+
images = read_images_text(os.path.join(path, "images" + ext))
|
263 |
+
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
|
264 |
+
else:
|
265 |
+
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
|
266 |
+
images = read_images_binary(os.path.join(path, "images" + ext))
|
267 |
+
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
|
268 |
+
return cameras, images, points3D
|
269 |
+
|
270 |
+
|
271 |
+
def qvec2rotmat(qvec):
|
272 |
+
return np.array([
|
273 |
+
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
|
274 |
+
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
|
275 |
+
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
|
276 |
+
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
|
277 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
|
278 |
+
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
|
279 |
+
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
|
280 |
+
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
|
281 |
+
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
|
282 |
+
|
283 |
+
|
284 |
+
def rotmat2qvec(R):
|
285 |
+
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
|
286 |
+
K = np.array([
|
287 |
+
[Rxx - Ryy - Rzz, 0, 0, 0],
|
288 |
+
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
|
289 |
+
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
|
290 |
+
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
|
291 |
+
eigvals, eigvecs = np.linalg.eigh(K)
|
292 |
+
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
|
293 |
+
if qvec[0] < 0:
|
294 |
+
qvec *= -1
|
295 |
+
return qvec
|
instant-nsr-pl/datasets/dtu.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from torch.utils.data import Dataset, DataLoader, IterableDataset
|
11 |
+
import torchvision.transforms.functional as TF
|
12 |
+
|
13 |
+
import pytorch_lightning as pl
|
14 |
+
|
15 |
+
import datasets
|
16 |
+
from models.ray_utils import get_ray_directions
|
17 |
+
from utils.misc import get_rank
|
18 |
+
|
19 |
+
|
20 |
+
def load_K_Rt_from_P(P=None):
|
21 |
+
out = cv2.decomposeProjectionMatrix(P)
|
22 |
+
K = out[0]
|
23 |
+
R = out[1]
|
24 |
+
t = out[2]
|
25 |
+
|
26 |
+
K = K / K[2, 2]
|
27 |
+
intrinsics = np.eye(4)
|
28 |
+
intrinsics[:3, :3] = K
|
29 |
+
|
30 |
+
pose = np.eye(4, dtype=np.float32)
|
31 |
+
pose[:3, :3] = R.transpose()
|
32 |
+
pose[:3, 3] = (t[:3] / t[3])[:, 0]
|
33 |
+
|
34 |
+
return intrinsics, pose
|
35 |
+
|
36 |
+
def create_spheric_poses(cameras, n_steps=120):
|
37 |
+
center = torch.as_tensor([0.,0.,0.], dtype=cameras.dtype, device=cameras.device)
|
38 |
+
cam_center = F.normalize(cameras.mean(0), p=2, dim=-1) * cameras.mean(0).norm(2)
|
39 |
+
eigvecs = torch.linalg.eig(cameras.T @ cameras).eigenvectors
|
40 |
+
rot_axis = F.normalize(eigvecs[:,1].real.float(), p=2, dim=-1)
|
41 |
+
up = rot_axis
|
42 |
+
rot_dir = torch.cross(rot_axis, cam_center)
|
43 |
+
max_angle = (F.normalize(cameras, p=2, dim=-1) * F.normalize(cam_center, p=2, dim=-1)).sum(-1).acos().max()
|
44 |
+
|
45 |
+
all_c2w = []
|
46 |
+
for theta in torch.linspace(-max_angle, max_angle, n_steps):
|
47 |
+
cam_pos = cam_center * math.cos(theta) + rot_dir * math.sin(theta)
|
48 |
+
l = F.normalize(center - cam_pos, p=2, dim=0)
|
49 |
+
s = F.normalize(l.cross(up), p=2, dim=0)
|
50 |
+
u = F.normalize(s.cross(l), p=2, dim=0)
|
51 |
+
c2w = torch.cat([torch.stack([s, u, -l], dim=1), cam_pos[:,None]], axis=1)
|
52 |
+
all_c2w.append(c2w)
|
53 |
+
|
54 |
+
all_c2w = torch.stack(all_c2w, dim=0)
|
55 |
+
|
56 |
+
return all_c2w
|
57 |
+
|
58 |
+
class DTUDatasetBase():
|
59 |
+
def setup(self, config, split):
|
60 |
+
self.config = config
|
61 |
+
self.split = split
|
62 |
+
self.rank = get_rank()
|
63 |
+
|
64 |
+
cams = np.load(os.path.join(self.config.root_dir, self.config.cameras_file))
|
65 |
+
|
66 |
+
img_sample = cv2.imread(os.path.join(self.config.root_dir, 'image', '000000.png'))
|
67 |
+
H, W = img_sample.shape[0], img_sample.shape[1]
|
68 |
+
|
69 |
+
if 'img_wh' in self.config:
|
70 |
+
w, h = self.config.img_wh
|
71 |
+
assert round(W / w * h) == H
|
72 |
+
elif 'img_downscale' in self.config:
|
73 |
+
w, h = int(W / self.config.img_downscale + 0.5), int(H / self.config.img_downscale + 0.5)
|
74 |
+
else:
|
75 |
+
raise KeyError("Either img_wh or img_downscale should be specified.")
|
76 |
+
|
77 |
+
self.w, self.h = w, h
|
78 |
+
self.img_wh = (w, h)
|
79 |
+
self.factor = w / W
|
80 |
+
|
81 |
+
mask_dir = os.path.join(self.config.root_dir, 'mask')
|
82 |
+
self.has_mask = True
|
83 |
+
self.apply_mask = self.config.apply_mask
|
84 |
+
|
85 |
+
self.directions = []
|
86 |
+
self.all_c2w, self.all_images, self.all_fg_masks = [], [], []
|
87 |
+
|
88 |
+
n_images = max([int(k.split('_')[-1]) for k in cams.keys()]) + 1
|
89 |
+
|
90 |
+
for i in range(n_images):
|
91 |
+
world_mat, scale_mat = cams[f'world_mat_{i}'], cams[f'scale_mat_{i}']
|
92 |
+
P = (world_mat @ scale_mat)[:3,:4]
|
93 |
+
K, c2w = load_K_Rt_from_P(P)
|
94 |
+
fx, fy, cx, cy = K[0,0] * self.factor, K[1,1] * self.factor, K[0,2] * self.factor, K[1,2] * self.factor
|
95 |
+
directions = get_ray_directions(w, h, fx, fy, cx, cy)
|
96 |
+
self.directions.append(directions)
|
97 |
+
|
98 |
+
c2w = torch.from_numpy(c2w).float()
|
99 |
+
|
100 |
+
# blender follows opengl camera coordinates (right up back)
|
101 |
+
# NeuS DTU data coordinate system (right down front) is different from blender
|
102 |
+
# https://github.com/Totoro97/NeuS/issues/9
|
103 |
+
# for c2w, flip the sign of input camera coordinate yz
|
104 |
+
c2w_ = c2w.clone()
|
105 |
+
c2w_[:3,1:3] *= -1. # flip input sign
|
106 |
+
self.all_c2w.append(c2w_[:3,:4])
|
107 |
+
|
108 |
+
if self.split in ['train', 'val']:
|
109 |
+
img_path = os.path.join(self.config.root_dir, 'image', f'{i:06d}.png')
|
110 |
+
img = Image.open(img_path)
|
111 |
+
img = img.resize(self.img_wh, Image.BICUBIC)
|
112 |
+
img = TF.to_tensor(img).permute(1, 2, 0)[...,:3]
|
113 |
+
|
114 |
+
mask_path = os.path.join(mask_dir, f'{i:03d}.png')
|
115 |
+
mask = Image.open(mask_path).convert('L') # (H, W, 1)
|
116 |
+
mask = mask.resize(self.img_wh, Image.BICUBIC)
|
117 |
+
mask = TF.to_tensor(mask)[0]
|
118 |
+
|
119 |
+
self.all_fg_masks.append(mask) # (h, w)
|
120 |
+
self.all_images.append(img)
|
121 |
+
|
122 |
+
self.all_c2w = torch.stack(self.all_c2w, dim=0)
|
123 |
+
|
124 |
+
if self.split == 'test':
|
125 |
+
self.all_c2w = create_spheric_poses(self.all_c2w[:,:,3], n_steps=self.config.n_test_traj_steps)
|
126 |
+
self.all_images = torch.zeros((self.config.n_test_traj_steps, self.h, self.w, 3), dtype=torch.float32)
|
127 |
+
self.all_fg_masks = torch.zeros((self.config.n_test_traj_steps, self.h, self.w), dtype=torch.float32)
|
128 |
+
self.directions = self.directions[0]
|
129 |
+
else:
|
130 |
+
self.all_images, self.all_fg_masks = torch.stack(self.all_images, dim=0), torch.stack(self.all_fg_masks, dim=0)
|
131 |
+
self.directions = torch.stack(self.directions, dim=0)
|
132 |
+
|
133 |
+
self.directions = self.directions.float().to(self.rank)
|
134 |
+
self.all_c2w, self.all_images, self.all_fg_masks = \
|
135 |
+
self.all_c2w.float().to(self.rank), \
|
136 |
+
self.all_images.float().to(self.rank), \
|
137 |
+
self.all_fg_masks.float().to(self.rank)
|
138 |
+
|
139 |
+
|
140 |
+
class DTUDataset(Dataset, DTUDatasetBase):
|
141 |
+
def __init__(self, config, split):
|
142 |
+
self.setup(config, split)
|
143 |
+
|
144 |
+
def __len__(self):
|
145 |
+
return len(self.all_images)
|
146 |
+
|
147 |
+
def __getitem__(self, index):
|
148 |
+
return {
|
149 |
+
'index': index
|
150 |
+
}
|
151 |
+
|
152 |
+
|
153 |
+
class DTUIterableDataset(IterableDataset, DTUDatasetBase):
|
154 |
+
def __init__(self, config, split):
|
155 |
+
self.setup(config, split)
|
156 |
+
|
157 |
+
def __iter__(self):
|
158 |
+
while True:
|
159 |
+
yield {}
|
160 |
+
|
161 |
+
|
162 |
+
@datasets.register('dtu')
|
163 |
+
class DTUDataModule(pl.LightningDataModule):
|
164 |
+
def __init__(self, config):
|
165 |
+
super().__init__()
|
166 |
+
self.config = config
|
167 |
+
|
168 |
+
def setup(self, stage=None):
|
169 |
+
if stage in [None, 'fit']:
|
170 |
+
self.train_dataset = DTUIterableDataset(self.config, 'train')
|
171 |
+
if stage in [None, 'fit', 'validate']:
|
172 |
+
self.val_dataset = DTUDataset(self.config, self.config.get('val_split', 'train'))
|
173 |
+
if stage in [None, 'test']:
|
174 |
+
self.test_dataset = DTUDataset(self.config, self.config.get('test_split', 'test'))
|
175 |
+
if stage in [None, 'predict']:
|
176 |
+
self.predict_dataset = DTUDataset(self.config, 'train')
|
177 |
+
|
178 |
+
def prepare_data(self):
|
179 |
+
pass
|
180 |
+
|
181 |
+
def general_loader(self, dataset, batch_size):
|
182 |
+
sampler = None
|
183 |
+
return DataLoader(
|
184 |
+
dataset,
|
185 |
+
num_workers=os.cpu_count(),
|
186 |
+
batch_size=batch_size,
|
187 |
+
pin_memory=True,
|
188 |
+
sampler=sampler
|
189 |
+
)
|
190 |
+
|
191 |
+
def train_dataloader(self):
|
192 |
+
return self.general_loader(self.train_dataset, batch_size=1)
|
193 |
+
|
194 |
+
def val_dataloader(self):
|
195 |
+
return self.general_loader(self.val_dataset, batch_size=1)
|
196 |
+
|
197 |
+
def test_dataloader(self):
|
198 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
199 |
+
|
200 |
+
def predict_dataloader(self):
|
201 |
+
return self.general_loader(self.predict_dataset, batch_size=1)
|
instant-nsr-pl/datasets/fixed_poses/000_back_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-1.000000238418579102e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
|
2 |
+
0.000000000000000000e+00 -1.343588564850506373e-07 1.000000119209289551e+00 1.746665105883948854e-07
|
3 |
+
0.000000000000000000e+00 1.000000119209289551e+00 -1.343588564850506373e-07 -1.300000071525573730e+00
|
instant-nsr-pl/datasets/fixed_poses/000_back_left_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-7.071069478988647461e-01 -7.071068286895751953e-01 0.000000000000000000e+00 -1.192092895507812500e-07
|
2 |
+
0.000000000000000000e+00 -7.587616579485256807e-08 1.000000119209289551e+00 9.863901340168013121e-08
|
3 |
+
-7.071068286895751953e-01 7.071068286895751953e-01 -7.587616579485256807e-08 -1.838477730751037598e+00
|
instant-nsr-pl/datasets/fixed_poses/000_back_right_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-7.071069478988647461e-01 7.071068286895751953e-01 0.000000000000000000e+00 1.192092895507812500e-07
|
2 |
+
0.000000000000000000e+00 -7.587616579485256807e-08 1.000000119209289551e+00 9.863901340168013121e-08
|
3 |
+
7.071068286895751953e-01 7.071068286895751953e-01 -7.587616579485256807e-08 -1.838477730751037598e+00
|
instant-nsr-pl/datasets/fixed_poses/000_front_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
|
2 |
+
0.000000000000000000e+00 -1.343588564850506373e-07 1.000000119209289551e+00 -1.746665105883948854e-07
|
3 |
+
0.000000000000000000e+00 -1.000000119209289551e+00 -1.343588564850506373e-07 -1.300000071525573730e+00
|
instant-nsr-pl/datasets/fixed_poses/000_front_left_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
7.071067690849304199e-01 -7.071068286895751953e-01 0.000000000000000000e+00 -1.192092895507812500e-07
|
2 |
+
0.000000000000000000e+00 -7.587616579485256807e-08 1.000000119209289551e+00 -9.863901340168013121e-08
|
3 |
+
-7.071068286895751953e-01 -7.071068286895751953e-01 -7.587616579485256807e-08 -1.838477730751037598e+00
|
instant-nsr-pl/datasets/fixed_poses/000_front_right_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
7.071067690849304199e-01 7.071068286895751953e-01 0.000000000000000000e+00 1.192092895507812500e-07
|
2 |
+
0.000000000000000000e+00 -7.587616579485256807e-08 1.000000119209289551e+00 -9.863901340168013121e-08
|
3 |
+
7.071068286895751953e-01 -7.071068286895751953e-01 -7.587616579485256807e-08 -1.838477730751037598e+00
|
instant-nsr-pl/datasets/fixed_poses/000_left_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-2.220446049250313081e-16 -1.000000000000000000e+00 0.000000000000000000e+00 -2.886579758146288598e-16
|
2 |
+
0.000000000000000000e+00 -2.220446049250313081e-16 1.000000000000000000e+00 0.000000000000000000e+00
|
3 |
+
-1.000000000000000000e+00 0.000000000000000000e+00 -2.220446049250313081e-16 -1.299999952316284180e+00
|
instant-nsr-pl/datasets/fixed_poses/000_right_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-2.220446049250313081e-16 1.000000000000000000e+00 0.000000000000000000e+00 2.886579758146288598e-16
|
2 |
+
0.000000000000000000e+00 -2.220446049250313081e-16 1.000000000000000000e+00 0.000000000000000000e+00
|
3 |
+
1.000000000000000000e+00 0.000000000000000000e+00 -2.220446049250313081e-16 -1.299999952316284180e+00
|
instant-nsr-pl/datasets/fixed_poses/000_top_RT.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
|
2 |
+
0.000000000000000000e+00 1.000000000000000000e+00 0.000000000000000000e+00 0.000000000000000000e+00
|
3 |
+
0.000000000000000000e+00 0.000000000000000000e+00 1.000000000000000000e+00 -1.299999952316284180e+00
|
instant-nsr-pl/datasets/ortho.py
ADDED
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
import cv2
|
7 |
+
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from torch.utils.data import Dataset, DataLoader, IterableDataset
|
11 |
+
import torchvision.transforms.functional as TF
|
12 |
+
|
13 |
+
import pytorch_lightning as pl
|
14 |
+
|
15 |
+
import datasets
|
16 |
+
from models.ray_utils import get_ortho_ray_directions_origins, get_ortho_rays, get_ray_directions
|
17 |
+
from utils.misc import get_rank
|
18 |
+
|
19 |
+
from glob import glob
|
20 |
+
import PIL.Image
|
21 |
+
|
22 |
+
|
23 |
+
def camNormal2worldNormal(rot_c2w, camNormal):
|
24 |
+
H,W,_ = camNormal.shape
|
25 |
+
normal_img = np.matmul(rot_c2w[None, :, :], camNormal.reshape(-1,3)[:, :, None]).reshape([H, W, 3])
|
26 |
+
|
27 |
+
return normal_img
|
28 |
+
|
29 |
+
def worldNormal2camNormal(rot_w2c, worldNormal):
|
30 |
+
H,W,_ = worldNormal.shape
|
31 |
+
normal_img = np.matmul(rot_w2c[None, :, :], worldNormal.reshape(-1,3)[:, :, None]).reshape([H, W, 3])
|
32 |
+
|
33 |
+
return normal_img
|
34 |
+
|
35 |
+
def trans_normal(normal, RT_w2c, RT_w2c_target):
|
36 |
+
|
37 |
+
normal_world = camNormal2worldNormal(np.linalg.inv(RT_w2c[:3,:3]), normal)
|
38 |
+
normal_target_cam = worldNormal2camNormal(RT_w2c_target[:3,:3], normal_world)
|
39 |
+
|
40 |
+
return normal_target_cam
|
41 |
+
|
42 |
+
def img2normal(img):
|
43 |
+
return (img/255.)*2-1
|
44 |
+
|
45 |
+
def normal2img(normal):
|
46 |
+
return np.uint8((normal*0.5+0.5)*255)
|
47 |
+
|
48 |
+
def norm_normalize(normal, dim=-1):
|
49 |
+
|
50 |
+
normal = normal/(np.linalg.norm(normal, axis=dim, keepdims=True)+1e-6)
|
51 |
+
|
52 |
+
return normal
|
53 |
+
|
54 |
+
def RT_opengl2opencv(RT):
|
55 |
+
# Build the coordinate transform matrix from world to computer vision camera
|
56 |
+
# R_world2cv = R_bcam2cv@R_world2bcam
|
57 |
+
# T_world2cv = R_bcam2cv@T_world2bcam
|
58 |
+
|
59 |
+
R = RT[:3, :3]
|
60 |
+
t = RT[:3, 3]
|
61 |
+
|
62 |
+
R_bcam2cv = np.asarray([[1, 0, 0], [0, -1, 0], [0, 0, -1]], np.float32)
|
63 |
+
|
64 |
+
R_world2cv = R_bcam2cv @ R
|
65 |
+
t_world2cv = R_bcam2cv @ t
|
66 |
+
|
67 |
+
RT = np.concatenate([R_world2cv,t_world2cv[:,None]],1)
|
68 |
+
|
69 |
+
return RT
|
70 |
+
|
71 |
+
def normal_opengl2opencv(normal):
|
72 |
+
H,W,C = np.shape(normal)
|
73 |
+
# normal_img = np.reshape(normal, (H*W,C))
|
74 |
+
R_bcam2cv = np.array([1, -1, -1], np.float32)
|
75 |
+
normal_cv = normal * R_bcam2cv[None, None, :]
|
76 |
+
|
77 |
+
print(np.shape(normal_cv))
|
78 |
+
|
79 |
+
return normal_cv
|
80 |
+
|
81 |
+
def inv_RT(RT):
|
82 |
+
RT_h = np.concatenate([RT, np.array([[0,0,0,1]])], axis=0)
|
83 |
+
RT_inv = np.linalg.inv(RT_h)
|
84 |
+
|
85 |
+
return RT_inv[:3, :]
|
86 |
+
|
87 |
+
|
88 |
+
def load_a_prediction(root_dir, test_object, imSize, view_types, load_color=False, cam_pose_dir=None,
|
89 |
+
normal_system='front', erode_mask=True, camera_type='ortho', cam_params=None):
|
90 |
+
|
91 |
+
all_images = []
|
92 |
+
all_normals = []
|
93 |
+
all_normals_world = []
|
94 |
+
all_masks = []
|
95 |
+
all_color_masks = []
|
96 |
+
all_poses = []
|
97 |
+
all_w2cs = []
|
98 |
+
directions = []
|
99 |
+
ray_origins = []
|
100 |
+
|
101 |
+
RT_front = np.loadtxt(glob(os.path.join(cam_pose_dir, '*_%s_RT.txt'%( 'front')))[0]) # world2cam matrix
|
102 |
+
RT_front_cv = RT_opengl2opencv(RT_front) # convert normal from opengl to opencv
|
103 |
+
for idx, view in enumerate(view_types):
|
104 |
+
print(os.path.join(root_dir,test_object))
|
105 |
+
normal_filepath = os.path.join(root_dir, test_object, 'normals_000_%s.png'%( view))
|
106 |
+
# Load key frame
|
107 |
+
if load_color: # use bgr
|
108 |
+
image =np.array(PIL.Image.open(normal_filepath.replace("normals", "rgb")).resize(imSize))[:, :, :3]
|
109 |
+
|
110 |
+
normal = np.array(PIL.Image.open(normal_filepath).resize(imSize))
|
111 |
+
mask = normal[:, :, 3]
|
112 |
+
normal = normal[:, :, :3]
|
113 |
+
|
114 |
+
color_mask = np.array(PIL.Image.open(os.path.join(root_dir,test_object, 'masked_colors/rgb_000_%s.png'%( view))).resize(imSize))[:, :, 3]
|
115 |
+
invalid_color_mask = color_mask < 255*0.5
|
116 |
+
threshold = np.ones_like(image[:, :, 0]) * 250
|
117 |
+
invalid_white_mask = (image[:, :, 0] > threshold) & (image[:, :, 1] > threshold) & (image[:, :, 2] > threshold)
|
118 |
+
invalid_color_mask_final = invalid_color_mask & invalid_white_mask
|
119 |
+
color_mask = (1 - invalid_color_mask_final) > 0
|
120 |
+
|
121 |
+
# if erode_mask:
|
122 |
+
# kernel = np.ones((3, 3), np.uint8)
|
123 |
+
# mask = cv2.erode(mask, kernel, iterations=1)
|
124 |
+
|
125 |
+
RT = np.loadtxt(os.path.join(cam_pose_dir, '000_%s_RT.txt'%( view))) # world2cam matrix
|
126 |
+
|
127 |
+
normal = img2normal(normal)
|
128 |
+
|
129 |
+
normal[mask==0] = [0,0,0]
|
130 |
+
mask = mask> (0.5*255)
|
131 |
+
if load_color:
|
132 |
+
all_images.append(image)
|
133 |
+
|
134 |
+
all_masks.append(mask)
|
135 |
+
all_color_masks.append(color_mask)
|
136 |
+
RT_cv = RT_opengl2opencv(RT) # convert normal from opengl to opencv
|
137 |
+
all_poses.append(inv_RT(RT_cv)) # cam2world
|
138 |
+
all_w2cs.append(RT_cv)
|
139 |
+
|
140 |
+
# whether to
|
141 |
+
normal_cam_cv = normal_opengl2opencv(normal)
|
142 |
+
|
143 |
+
if normal_system == 'front':
|
144 |
+
print("the loaded normals are defined in the system of front view")
|
145 |
+
normal_world = camNormal2worldNormal(inv_RT(RT_front_cv)[:3, :3], normal_cam_cv)
|
146 |
+
elif normal_system == 'self':
|
147 |
+
print("the loaded normals are in their independent camera systems")
|
148 |
+
normal_world = camNormal2worldNormal(inv_RT(RT_cv)[:3, :3], normal_cam_cv)
|
149 |
+
all_normals.append(normal_cam_cv)
|
150 |
+
all_normals_world.append(normal_world)
|
151 |
+
|
152 |
+
if camera_type == 'ortho':
|
153 |
+
origins, dirs = get_ortho_ray_directions_origins(W=imSize[0], H=imSize[1])
|
154 |
+
elif camera_type == 'pinhole':
|
155 |
+
dirs = get_ray_directions(W=imSize[0], H=imSize[1],
|
156 |
+
fx=cam_params[0], fy=cam_params[1], cx=cam_params[2], cy=cam_params[3])
|
157 |
+
origins = dirs # occupy a position
|
158 |
+
else:
|
159 |
+
raise Exception("not support camera type")
|
160 |
+
ray_origins.append(origins)
|
161 |
+
directions.append(dirs)
|
162 |
+
|
163 |
+
|
164 |
+
if not load_color:
|
165 |
+
all_images = [normal2img(x) for x in all_normals_world]
|
166 |
+
|
167 |
+
|
168 |
+
return np.stack(all_images), np.stack(all_masks), np.stack(all_normals), \
|
169 |
+
np.stack(all_normals_world), np.stack(all_poses), np.stack(all_w2cs), np.stack(ray_origins), np.stack(directions), np.stack(all_color_masks)
|
170 |
+
|
171 |
+
|
172 |
+
class OrthoDatasetBase():
|
173 |
+
def setup(self, config, split):
|
174 |
+
self.config = config
|
175 |
+
self.split = split
|
176 |
+
self.rank = get_rank()
|
177 |
+
|
178 |
+
self.data_dir = self.config.root_dir
|
179 |
+
self.object_name = self.config.scene
|
180 |
+
self.scene = self.config.scene
|
181 |
+
self.imSize = self.config.imSize
|
182 |
+
self.load_color = True
|
183 |
+
self.img_wh = [self.imSize[0], self.imSize[1]]
|
184 |
+
self.w = self.img_wh[0]
|
185 |
+
self.h = self.img_wh[1]
|
186 |
+
self.camera_type = self.config.camera_type
|
187 |
+
self.camera_params = self.config.camera_params # [fx, fy, cx, cy]
|
188 |
+
|
189 |
+
self.view_types = ['front', 'front_right', 'right', 'back', 'left', 'front_left']
|
190 |
+
|
191 |
+
self.view_weights = torch.from_numpy(np.array(self.config.view_weights)).float().to(self.rank).view(-1)
|
192 |
+
self.view_weights = self.view_weights.view(-1,1,1).repeat(1, self.h, self.w)
|
193 |
+
|
194 |
+
if self.config.cam_pose_dir is None:
|
195 |
+
self.cam_pose_dir = "./datasets/fixed_poses"
|
196 |
+
else:
|
197 |
+
self.cam_pose_dir = self.config.cam_pose_dir
|
198 |
+
|
199 |
+
self.images_np, self.masks_np, self.normals_cam_np, self.normals_world_np, \
|
200 |
+
self.pose_all_np, self.w2c_all_np, self.origins_np, self.directions_np, self.rgb_masks_np = load_a_prediction(
|
201 |
+
self.data_dir, self.object_name, self.imSize, self.view_types,
|
202 |
+
self.load_color, self.cam_pose_dir, normal_system='front',
|
203 |
+
camera_type=self.camera_type, cam_params=self.camera_params)
|
204 |
+
|
205 |
+
self.has_mask = True
|
206 |
+
self.apply_mask = self.config.apply_mask
|
207 |
+
|
208 |
+
self.all_c2w = torch.from_numpy(self.pose_all_np)
|
209 |
+
self.all_images = torch.from_numpy(self.images_np) / 255.
|
210 |
+
self.all_fg_masks = torch.from_numpy(self.masks_np)
|
211 |
+
self.all_rgb_masks = torch.from_numpy(self.rgb_masks_np)
|
212 |
+
self.all_normals_world = torch.from_numpy(self.normals_world_np)
|
213 |
+
self.origins = torch.from_numpy(self.origins_np)
|
214 |
+
self.directions = torch.from_numpy(self.directions_np)
|
215 |
+
|
216 |
+
self.directions = self.directions.float().to(self.rank)
|
217 |
+
self.origins = self.origins.float().to(self.rank)
|
218 |
+
self.all_rgb_masks = self.all_rgb_masks.float().to(self.rank)
|
219 |
+
self.all_c2w, self.all_images, self.all_fg_masks, self.all_normals_world = \
|
220 |
+
self.all_c2w.float().to(self.rank), \
|
221 |
+
self.all_images.float().to(self.rank), \
|
222 |
+
self.all_fg_masks.float().to(self.rank), \
|
223 |
+
self.all_normals_world.float().to(self.rank)
|
224 |
+
|
225 |
+
|
226 |
+
class OrthoDataset(Dataset, OrthoDatasetBase):
|
227 |
+
def __init__(self, config, split):
|
228 |
+
self.setup(config, split)
|
229 |
+
|
230 |
+
def __len__(self):
|
231 |
+
return len(self.all_images)
|
232 |
+
|
233 |
+
def __getitem__(self, index):
|
234 |
+
return {
|
235 |
+
'index': index
|
236 |
+
}
|
237 |
+
|
238 |
+
|
239 |
+
class OrthoIterableDataset(IterableDataset, OrthoDatasetBase):
|
240 |
+
def __init__(self, config, split):
|
241 |
+
self.setup(config, split)
|
242 |
+
|
243 |
+
def __iter__(self):
|
244 |
+
while True:
|
245 |
+
yield {}
|
246 |
+
|
247 |
+
|
248 |
+
@datasets.register('ortho')
|
249 |
+
class OrthoDataModule(pl.LightningDataModule):
|
250 |
+
def __init__(self, config):
|
251 |
+
super().__init__()
|
252 |
+
self.config = config
|
253 |
+
|
254 |
+
def setup(self, stage=None):
|
255 |
+
if stage in [None, 'fit']:
|
256 |
+
self.train_dataset = OrthoIterableDataset(self.config, 'train')
|
257 |
+
if stage in [None, 'fit', 'validate']:
|
258 |
+
self.val_dataset = OrthoDataset(self.config, self.config.get('val_split', 'train'))
|
259 |
+
if stage in [None, 'test']:
|
260 |
+
self.test_dataset = OrthoDataset(self.config, self.config.get('test_split', 'test'))
|
261 |
+
if stage in [None, 'predict']:
|
262 |
+
self.predict_dataset = OrthoDataset(self.config, 'train')
|
263 |
+
|
264 |
+
def prepare_data(self):
|
265 |
+
pass
|
266 |
+
|
267 |
+
def general_loader(self, dataset, batch_size):
|
268 |
+
sampler = None
|
269 |
+
return DataLoader(
|
270 |
+
dataset,
|
271 |
+
num_workers=os.cpu_count(),
|
272 |
+
batch_size=batch_size,
|
273 |
+
pin_memory=True,
|
274 |
+
sampler=sampler
|
275 |
+
)
|
276 |
+
|
277 |
+
def train_dataloader(self):
|
278 |
+
return self.general_loader(self.train_dataset, batch_size=1)
|
279 |
+
|
280 |
+
def val_dataloader(self):
|
281 |
+
return self.general_loader(self.val_dataset, batch_size=1)
|
282 |
+
|
283 |
+
def test_dataloader(self):
|
284 |
+
return self.general_loader(self.test_dataset, batch_size=1)
|
285 |
+
|
286 |
+
def predict_dataloader(self):
|
287 |
+
return self.general_loader(self.predict_dataset, batch_size=1)
|
instant-nsr-pl/datasets/utils.py
ADDED
File without changes
|