jingyangcarl commited on
Commit
0b56dc2
·
1 Parent(s): 80378ac
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +2 -1
  2. app_texnet.py +5 -3
  3. install.sh +17 -0
  4. model.py +103 -49
  5. requirements.txt +9 -1
  6. settings.py +2 -1
  7. text2tex/lib/__init__.py +0 -0
  8. text2tex/lib/camera_helper.py +231 -0
  9. text2tex/lib/constants.py +648 -0
  10. text2tex/lib/diffusion_helper.py +189 -0
  11. text2tex/lib/io_helper.py +78 -0
  12. text2tex/lib/mesh_helper.py +148 -0
  13. text2tex/lib/projection_helper.py +464 -0
  14. text2tex/lib/render_helper.py +108 -0
  15. text2tex/lib/shading_helper.py +45 -0
  16. text2tex/lib/vis_helper.py +209 -0
  17. text2tex/models/ControlNet/.gitignore +143 -0
  18. text2tex/models/ControlNet/LICENSE +201 -0
  19. text2tex/models/ControlNet/README.md +234 -0
  20. text2tex/models/ControlNet/annotator/canny/__init__.py +5 -0
  21. text2tex/models/ControlNet/annotator/ckpts/ckpts.txt +1 -0
  22. text2tex/models/ControlNet/annotator/hed/__init__.py +127 -0
  23. text2tex/models/ControlNet/annotator/midas/__init__.py +36 -0
  24. text2tex/models/ControlNet/annotator/midas/api.py +165 -0
  25. text2tex/models/ControlNet/annotator/midas/midas/__init__.py +0 -0
  26. text2tex/models/ControlNet/annotator/midas/midas/base_model.py +16 -0
  27. text2tex/models/ControlNet/annotator/midas/midas/blocks.py +342 -0
  28. text2tex/models/ControlNet/annotator/midas/midas/dpt_depth.py +109 -0
  29. text2tex/models/ControlNet/annotator/midas/midas/midas_net.py +76 -0
  30. text2tex/models/ControlNet/annotator/midas/midas/midas_net_custom.py +128 -0
  31. text2tex/models/ControlNet/annotator/midas/midas/transforms.py +234 -0
  32. text2tex/models/ControlNet/annotator/midas/midas/vit.py +491 -0
  33. text2tex/models/ControlNet/annotator/midas/utils.py +189 -0
  34. text2tex/models/ControlNet/annotator/mlsd/__init__.py +30 -0
  35. text2tex/models/ControlNet/annotator/mlsd/models/mbv2_mlsd_large.py +292 -0
  36. text2tex/models/ControlNet/annotator/mlsd/models/mbv2_mlsd_tiny.py +275 -0
  37. text2tex/models/ControlNet/annotator/mlsd/utils.py +580 -0
  38. text2tex/models/ControlNet/annotator/openpose/__init__.py +29 -0
  39. text2tex/models/ControlNet/annotator/openpose/body.py +219 -0
  40. text2tex/models/ControlNet/annotator/openpose/hand.py +86 -0
  41. text2tex/models/ControlNet/annotator/openpose/model.py +219 -0
  42. text2tex/models/ControlNet/annotator/openpose/util.py +164 -0
  43. text2tex/models/ControlNet/annotator/uniformer/__init__.py +13 -0
  44. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/ade20k.py +54 -0
  45. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/chase_db1.py +59 -0
  46. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes.py +54 -0
  47. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +35 -0
  48. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/drive.py +59 -0
  49. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/hrf.py +59 -0
  50. text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context.py +60 -0
.gitignore CHANGED
@@ -3,4 +3,5 @@ data
3
  # examples
4
  .gradio
5
  model_cache
6
- output
 
 
3
  # examples
4
  .gradio
5
  model_cache
6
+ output
7
+ test.png
app_texnet.py CHANGED
@@ -45,7 +45,7 @@ def apply_texture(src_mesh:str, texture:str, tag:str)->str:
45
 
46
  return mesh_copy
47
 
48
- def image_to_temp_path(img_like, tag):
49
  """
50
  Convert various image-like objects (str, PIL.Image, list, tuple) to temp PNG path.
51
  Returns the path to the saved image file.
@@ -62,13 +62,15 @@ def image_to_temp_path(img_like, tag):
62
 
63
  # If it's a PIL Image
64
  if isinstance(img_like, Image.Image):
65
- temp_path = os.path.join(tempfile.mkdtemp(), f"{tag}.png")
 
66
  img_like.save(temp_path)
67
  return temp_path
68
 
69
  # if it's numpy array
70
  if isinstance(img_like, np.ndarray):
71
- temp_path = os.path.join(tempfile.mkdtemp(), f"{tag}.png")
 
72
  img_like = Image.fromarray(img_like)
73
  img_like.save(temp_path)
74
  return temp_path
 
45
 
46
  return mesh_copy
47
 
48
+ def image_to_temp_path(img_like, tag, out_dir=None):
49
  """
50
  Convert various image-like objects (str, PIL.Image, list, tuple) to temp PNG path.
51
  Returns the path to the saved image file.
 
62
 
63
  # If it's a PIL Image
64
  if isinstance(img_like, Image.Image):
65
+ temp_path = os.path.join(tempfile.mkdtemp() if out_dir is None else out_dir, f"{tag}.png")
66
+ os.makedirs(os.path.dirname(temp_path), exist_ok=True)
67
  img_like.save(temp_path)
68
  return temp_path
69
 
70
  # if it's numpy array
71
  if isinstance(img_like, np.ndarray):
72
+ temp_path = os.path.join(tempfile.mkdtemp() if out_dir is None else out_dir, f"{tag}.png")
73
+ os.makedirs(os.path.dirname(temp_path), exist_ok=True)
74
  img_like = Image.fromarray(img_like)
75
  img_like.save(temp_path)
76
  return temp_path
install.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ conda activate base
2
+ conda remove -n matgen-plus --all
3
+
4
+
5
+ conda create -n matgen-plus python=3.11
6
+ conda activate matgen-plus
7
+
8
+ pip install diffusers["torch"] transformers accelerate xformers
9
+ pip install gradio
10
+ pip install controlnet-aux
11
+
12
+ # text2tex
13
+ conda install pytorch3d -c pytorch -c conda-forge
14
+ conda install -c conda-forge open-clip-torch pytorch-lightning
15
+ pip install trimesh xatlas scikit-learn opencv-python omegaconf
16
+
17
+ bash run.sh
model.py CHANGED
@@ -30,6 +30,8 @@ from app_texnet import image_to_temp_path
30
  import os
31
  import time
32
  import tempfile
 
 
33
 
34
  CONTROLNET_MODEL_IDS = {
35
  # "Openpose": "lllyasviel/control_v11p_sd15_openpose",
@@ -202,53 +204,97 @@ class Model:
202
  raise ValueError
203
  if num_images > MAX_NUM_IMAGES:
204
  raise ValueError
 
 
205
 
206
- self.preprocessor.load("texnet")
207
- control_image = self.preprocessor(
208
- image=image, low_threshold=low_threshold, high_threshold=high_threshold, image_resolution=image_resolution, output_type="pil"
209
- )
210
-
211
- self.load_controlnet_weight("texnet")
212
- tex_coarse = self.run_pipe(
213
- prompt=self.get_prompt(prompt, additional_prompt),
214
- negative_prompt=negative_prompt,
215
- control_image=control_image,
216
- num_images=num_images,
217
- num_steps=num_steps,
218
- guidance_scale=guidance_scale,
219
- seed=seed,
220
- )
221
-
222
- # use img2img pipeline
223
- self.pipe_backup = self.pipe
224
- self.pipe = self.pipe_base
225
-
226
- # refine
227
  tex_fine = []
228
- for result_coarse in tex_coarse:
229
- # clean up GPU cache
230
- torch.cuda.empty_cache()
231
- gc.collect()
232
-
233
- # masking
234
- mask = (np.array(control_image).sum(axis=-1) == 0)[...,None]
235
- image_masked = PIL.Image.fromarray(np.where(mask, control_image, result_coarse))
236
- image_blurry = transforms.GaussianBlur(kernel_size=5, sigma=1)(image_masked)
237
- result_fine = self.run_pipe(
238
- # prompt=prompt,
239
- prompt=self.get_prompt(prompt, additional_prompt),
240
- negative_prompt=negative_prompt,
241
- control_image=image_blurry,
242
- num_images=1,
243
- num_steps=num_steps,
244
- guidance_scale=guidance_scale,
245
- seed=seed,
246
- )[0]
247
- result_fine = PIL.Image.fromarray(np.where(mask, control_image, result_fine))
248
- tex_fine.append(result_fine)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
  # restore the original pipe
251
- self.pipe = self.pipe_backup
252
 
253
  # use rgb2x for now for generating the texture
254
  def rgb2x(
@@ -315,11 +361,18 @@ class Model:
315
  # Load rgb2x pipeline
316
  _, preds, prompts = rgb2x(self.pipe_rgb2x, torchvision.transforms.PILToTensor()(tex_fine[0]).to(self.pipe.device), inference_step=num_steps, num_samples=num_images)
317
 
318
- base_color_path = image_to_temp_path(tex_fine[0].rotate(90), "base_color")
319
- normal_map_path = image_to_temp_path(preds[0].rotate(90), "normal_map")
320
- roughness_path = image_to_temp_path(preds[1].rotate(90), "roughness")
321
- metallic_path = image_to_temp_path(preds[2].rotate(90), "metallic")
322
- prompt_nospace = prompt.replace(' ', '_')
 
 
 
 
 
 
 
323
  current_timecode = time.strftime("%Y%m%d_%H%M%S")
324
  # output_blend_path = os.path.join(os.getcwd(), "output", f"{obj_name}_{prompt_nospace}_{current_timecode}.blend") # replace with desired output path
325
  output_blend_path = os.path.join(tempfile.mkdtemp(), f"{obj_name}_{prompt_nospace}_{current_timecode}.blend") # replace with desired output path
@@ -345,7 +398,8 @@ class Model:
345
  run_blend_generation(
346
  blender_path=self.blender_path,
347
  generate_script_path="rgb2x/generate_blend.py",
348
- obj_path=f"examples/{obj_name}/mesh.obj", # replace with actual mesh path
 
349
  base_color_path=base_color_path,
350
  normal_map_path=normal_map_path,
351
  roughness_path=roughness_path,
 
30
  import os
31
  import time
32
  import tempfile
33
+ from text2tex.scripts.generate_texture import text2tex_call, init_args
34
+ from glob import glob
35
 
36
  CONTROLNET_MODEL_IDS = {
37
  # "Openpose": "lllyasviel/control_v11p_sd15_openpose",
 
204
  raise ValueError
205
  if num_images > MAX_NUM_IMAGES:
206
  raise ValueError
207
+
208
+ prompt_nospace = prompt.replace(' ', '_')
209
 
210
+ # self.preprocessor.load("texnet")
211
+ # control_image = self.preprocessor(
212
+ # image=image, low_threshold=low_threshold, high_threshold=high_threshold, image_resolution=image_resolution, output_type="pil"
213
+ # )
214
+
215
+ # self.load_controlnet_weight("texnet")
216
+ # tex_coarse = self.run_pipe(
217
+ # prompt=self.get_prompt(prompt, additional_prompt),
218
+ # negative_prompt=negative_prompt,
219
+ # control_image=control_image,
220
+ # num_images=num_images,
221
+ # num_steps=num_steps,
222
+ # guidance_scale=guidance_scale,
223
+ # seed=seed,
224
+ # )
225
+
226
+ # # use img2img pipeline
227
+ # self.pipe_backup = self.pipe
228
+ # self.pipe = self.pipe_base
229
+
230
+ # # refine
231
  tex_fine = []
232
+ mesh_fine = []
233
+ # for result_coarse in tex_coarse:
234
+ # # clean up GPU cache
235
+ # torch.cuda.empty_cache()
236
+ # gc.collect()
237
+
238
+ # # masking
239
+ # mask = (np.array(control_image).sum(axis=-1) == 0)[...,None]
240
+ # image_masked = PIL.Image.fromarray(np.where(mask, control_image, result_coarse))
241
+ # image_blurry = transforms.GaussianBlur(kernel_size=5, sigma=1)(image_masked)
242
+ # result_fine = self.run_pipe(
243
+ # # prompt=prompt,
244
+ # prompt=self.get_prompt(prompt, additional_prompt),
245
+ # negative_prompt=negative_prompt,
246
+ # control_image=image_blurry,
247
+ # num_images=1,
248
+ # num_steps=num_steps,
249
+ # guidance_scale=guidance_scale,
250
+ # seed=seed,
251
+ # )[0]
252
+ # result_fine = PIL.Image.fromarray(np.where(mask, control_image, result_fine))
253
+ # tex_fine.append(result_fine)
254
+
255
+ temp_out_path = tempfile.mkdtemp()
256
+ temp_out_path = 'output'
257
+
258
+ # put text2tex here,
259
+ args = init_args()
260
+ args.input_dir = f'examples/{obj_name}/'
261
+ args.output_dir = os.path.join(temp_out_path, f'{obj_name}/{prompt_nospace}')
262
+ args.obj_name = obj_name
263
+ args.obj_file = 'mesh.obj'
264
+ args.prompt = f'{prompt} {obj_name}'
265
+ args.add_view_to_prompt = True
266
+ args.ddim_steps = 5
267
+ # args.ddim_steps = 50
268
+ args.new_strength = 1.0
269
+ args.update_strength = 0.3
270
+ args.view_threshold = 0.1
271
+ args.blend = 0
272
+ args.dist = 1
273
+ args.num_viewpoints = 2
274
+ # args.num_viewpoints = 36
275
+ args.viewpoint_mode = 'predefined'
276
+ args.use_principle = True
277
+ args.update_steps = 2
278
+ # args.update_steps = 20
279
+ args.update_mode = 'heuristic'
280
+ args.seed = 42
281
+ args.post_process = True
282
+ args.device = '2080'
283
+ args.uv_size = 1000
284
+ args.image_size = 512
285
+ # args.image_size = 768
286
+ args.use_objaverse = True # assume the mesh is normalized with y-axis as up
287
+ output_dir = text2tex_call(args)
288
+
289
+ # get the texture and mesh with underscore '_post', which is the id of the last mesh, should be good for the visual
290
+ post_idx = glob(os.path.join(output_dir, 'update', 'mesh', "*_post.png"))[0].split('/')[-1].split('_')[0]
291
+
292
+ tex_fine.append(PIL.Image.open(os.path.join(output_dir, 'update', 'mesh', f"{post_idx}.png")).convert("RGB"))
293
+ mesh_fine.append(os.path.join(output_dir, 'update', 'mesh', f"{post_idx}.obj"))
294
+ torch.cuda.empty_cache()
295
 
296
  # restore the original pipe
297
+ # self.pipe = self.pipe_backup
298
 
299
  # use rgb2x for now for generating the texture
300
  def rgb2x(
 
361
  # Load rgb2x pipeline
362
  _, preds, prompts = rgb2x(self.pipe_rgb2x, torchvision.transforms.PILToTensor()(tex_fine[0]).to(self.pipe.device), inference_step=num_steps, num_samples=num_images)
363
 
364
+ intrinsic_dir = os.path.join(output_dir, 'intrinsic')
365
+ use_text2tex = True
366
+ if use_text2tex:
367
+ base_color_path = image_to_temp_path(tex_fine[0], "base_color", out_dir=intrinsic_dir)
368
+ normal_map_path = image_to_temp_path(preds[0], "normal_map", out_dir=intrinsic_dir)
369
+ roughness_path = image_to_temp_path(preds[1], "roughness", out_dir=intrinsic_dir)
370
+ metallic_path = image_to_temp_path(preds[2], "metallic", out_dir=intrinsic_dir)
371
+ else:
372
+ base_color_path = image_to_temp_path(tex_fine[0].rotate(90), "base_color", out_dir=intrinsic_dir)
373
+ normal_map_path = image_to_temp_path(preds[0].rotate(90), "normal_map", out_dir=intrinsic_dir)
374
+ roughness_path = image_to_temp_path(preds[1].rotate(90), "roughness", out_dir=intrinsic_dir)
375
+ metallic_path = image_to_temp_path(preds[2].rotate(90), "metallic", out_dir=intrinsic_dir)
376
  current_timecode = time.strftime("%Y%m%d_%H%M%S")
377
  # output_blend_path = os.path.join(os.getcwd(), "output", f"{obj_name}_{prompt_nospace}_{current_timecode}.blend") # replace with desired output path
378
  output_blend_path = os.path.join(tempfile.mkdtemp(), f"{obj_name}_{prompt_nospace}_{current_timecode}.blend") # replace with desired output path
 
398
  run_blend_generation(
399
  blender_path=self.blender_path,
400
  generate_script_path="rgb2x/generate_blend.py",
401
+ # obj_path=f"examples/{obj_name}/mesh.obj", # replace with actual mesh path
402
+ obj_path=mesh_fine[0], # replace with actual mesh path
403
  base_color_path=base_color_path,
404
  normal_map_path=normal_map_path,
405
  roughness_path=roughness_path,
requirements.txt CHANGED
@@ -5,4 +5,12 @@ torch
5
  transformers
6
  xformers
7
  controlnet-aux # for controlnet
8
- spaces # no need to specify here
 
 
 
 
 
 
 
 
 
5
  transformers
6
  xformers
7
  controlnet-aux # for controlnet
8
+ spaces # no need to specify here
9
+
10
+ # for text2tex
11
+ pytorch3d
12
+ trimesh
13
+ scikit-learn
14
+ opencv-python
15
+ matplotlib
16
+ omegaconf
settings.py CHANGED
@@ -19,4 +19,5 @@ MAX_SEED = np.iinfo(np.int32).max
19
  # setup CUDA
20
  # disable the following when deployting to hugging face
21
  # if os.getenv("CUDA_VISIBLE_DEVICES") is None:
22
- # os.environ["CUDA_VISIBLE_DEVICES"] = "7"
 
 
19
  # setup CUDA
20
  # disable the following when deployting to hugging face
21
  # if os.getenv("CUDA_VISIBLE_DEVICES") is None:
22
+ # os.environ["CUDA_VISIBLE_DEVICES"] = "7"
23
+ # os.environ["GRADIO_SERVER_PORT"] = "7864"
text2tex/lib/__init__.py ADDED
File without changes
text2tex/lib/camera_helper.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import numpy as np
4
+
5
+ from sklearn.metrics.pairwise import cosine_similarity
6
+
7
+ from pytorch3d.renderer import (
8
+ PerspectiveCameras,
9
+ look_at_view_transform
10
+ )
11
+
12
+ # customized
13
+ import sys
14
+ sys.path.append(".")
15
+
16
+ from lib.constants import VIEWPOINTS
17
+
18
+ # ---------------- UTILS ----------------------
19
+
20
+ def degree_to_radian(d):
21
+ return d * np.pi / 180
22
+
23
+ def radian_to_degree(r):
24
+ return 180 * r / np.pi
25
+
26
+ def xyz_to_polar(xyz):
27
+ """ assume y-axis is the up axis """
28
+
29
+ x, y, z = xyz
30
+
31
+ theta = 180 * np.arccos(z) / np.pi
32
+ phi = 180 * np.arccos(y) / np.pi
33
+
34
+ return theta, phi
35
+
36
+ def polar_to_xyz(theta, phi, dist):
37
+ """ assume y-axis is the up axis """
38
+
39
+ theta = degree_to_radian(theta)
40
+ phi = degree_to_radian(phi)
41
+
42
+ x = np.sin(phi) * np.sin(theta) * dist
43
+ y = np.cos(phi) * dist
44
+ z = np.sin(phi) * np.cos(theta) * dist
45
+
46
+ return [x, y, z]
47
+
48
+
49
+ # ---------------- VIEWPOINTS ----------------------
50
+
51
+
52
+ def filter_viewpoints(pre_viewpoints: dict, viewpoints: dict):
53
+ """ return the binary mask of viewpoints to be filtered """
54
+
55
+ filter_mask = [0 for _ in viewpoints.keys()]
56
+ for i, v in viewpoints.items():
57
+ x_v, y_v, z_v = polar_to_xyz(v["azim"], 90 - v["elev"], v["dist"])
58
+
59
+ for _, pv in pre_viewpoints.items():
60
+ x_pv, y_pv, z_pv = polar_to_xyz(pv["azim"], 90 - pv["elev"], pv["dist"])
61
+ sim = cosine_similarity(
62
+ np.array([[x_v, y_v, z_v]]),
63
+ np.array([[x_pv, y_pv, z_pv]])
64
+ )[0, 0]
65
+
66
+ if sim > 0.9:
67
+ filter_mask[i] = 1
68
+
69
+ return filter_mask
70
+
71
+
72
+ def init_viewpoints(mode, sample_space, init_dist, init_elev, principle_directions,
73
+ use_principle=True, use_shapenet=False, use_objaverse=False):
74
+
75
+ if mode == "predefined":
76
+
77
+ (
78
+ dist_list,
79
+ elev_list,
80
+ azim_list,
81
+ sector_list
82
+ ) = init_predefined_viewpoints(sample_space, init_dist, init_elev)
83
+
84
+ elif mode == "hemisphere":
85
+
86
+ (
87
+ dist_list,
88
+ elev_list,
89
+ azim_list,
90
+ sector_list
91
+ ) = init_hemisphere_viewpoints(sample_space, init_dist)
92
+
93
+ else:
94
+ raise NotImplementedError()
95
+
96
+ # punishments for views -> in case always selecting the same view
97
+ view_punishments = [1 for _ in range(len(dist_list))]
98
+
99
+ if use_principle:
100
+
101
+ (
102
+ dist_list,
103
+ elev_list,
104
+ azim_list,
105
+ sector_list,
106
+ view_punishments
107
+ ) = init_principle_viewpoints(
108
+ principle_directions,
109
+ dist_list,
110
+ elev_list,
111
+ azim_list,
112
+ sector_list,
113
+ view_punishments,
114
+ use_shapenet,
115
+ use_objaverse
116
+ )
117
+
118
+ return dist_list, elev_list, azim_list, sector_list, view_punishments
119
+
120
+
121
+ def init_principle_viewpoints(
122
+ principle_directions,
123
+ dist_list,
124
+ elev_list,
125
+ azim_list,
126
+ sector_list,
127
+ view_punishments,
128
+ use_shapenet=False,
129
+ use_objaverse=False
130
+ ):
131
+
132
+ if use_shapenet:
133
+ key = "shapenet"
134
+
135
+ pre_elev_list = [v for v in VIEWPOINTS[key]["elev"]]
136
+ pre_azim_list = [v for v in VIEWPOINTS[key]["azim"]]
137
+ pre_sector_list = [v for v in VIEWPOINTS[key]["sector"]]
138
+
139
+ num_principle = 10
140
+ pre_dist_list = [dist_list[0] for _ in range(num_principle)]
141
+ pre_view_punishments = [0 for _ in range(num_principle)]
142
+
143
+ elif use_objaverse:
144
+ key = "objaverse"
145
+
146
+ pre_elev_list = [v for v in VIEWPOINTS[key]["elev"]]
147
+ pre_azim_list = [v for v in VIEWPOINTS[key]["azim"]]
148
+ pre_sector_list = [v for v in VIEWPOINTS[key]["sector"]]
149
+
150
+ num_principle = 10
151
+ pre_dist_list = [dist_list[0] for _ in range(num_principle)]
152
+ pre_view_punishments = [0 for _ in range(num_principle)]
153
+ else:
154
+ num_principle = 6
155
+ pre_elev_list = [v for v in VIEWPOINTS[num_principle]["elev"]]
156
+ pre_azim_list = [v for v in VIEWPOINTS[num_principle]["azim"]]
157
+ pre_sector_list = [v for v in VIEWPOINTS[num_principle]["sector"]]
158
+ pre_dist_list = [dist_list[0] for _ in range(num_principle)]
159
+ pre_view_punishments = [0 for _ in range(num_principle)]
160
+
161
+ dist_list = pre_dist_list + dist_list
162
+ elev_list = pre_elev_list + elev_list
163
+ azim_list = pre_azim_list + azim_list
164
+ sector_list = pre_sector_list + sector_list
165
+ view_punishments = pre_view_punishments + view_punishments
166
+
167
+ return dist_list, elev_list, azim_list, sector_list, view_punishments
168
+
169
+
170
+ def init_predefined_viewpoints(sample_space, init_dist, init_elev):
171
+
172
+ viewpoints = VIEWPOINTS[sample_space]
173
+
174
+ assert sample_space == len(viewpoints["sector"])
175
+
176
+ dist_list = [init_dist for _ in range(sample_space)] # always the same dist
177
+ elev_list = [viewpoints["elev"][i] for i in range(sample_space)]
178
+ azim_list = [viewpoints["azim"][i] for i in range(sample_space)]
179
+ sector_list = [viewpoints["sector"][i] for i in range(sample_space)]
180
+
181
+ return dist_list, elev_list, azim_list, sector_list
182
+
183
+
184
+ def init_hemisphere_viewpoints(sample_space, init_dist):
185
+ """
186
+ y is up-axis
187
+ """
188
+
189
+ num_points = 2 * sample_space
190
+ ga = np.pi * (3. - np.sqrt(5.)) # golden angle in radians
191
+
192
+ flags = []
193
+ elev_list = [] # degree
194
+ azim_list = [] # degree
195
+
196
+ for i in range(num_points):
197
+ y = 1 - (i / float(num_points - 1)) * 2 # y goes from 1 to -1
198
+
199
+ # only take the north hemisphere
200
+ if y >= 0:
201
+ flags.append(True)
202
+ else:
203
+ flags.append(False)
204
+
205
+ theta = ga * i # golden angle increment
206
+
207
+ elev_list.append(radian_to_degree(np.arcsin(y)))
208
+ azim_list.append(radian_to_degree(theta))
209
+
210
+ radius = np.sqrt(1 - y * y) # radius at y
211
+ x = np.cos(theta) * radius
212
+ z = np.sin(theta) * radius
213
+
214
+ elev_list = [elev_list[i] for i in range(len(elev_list)) if flags[i]]
215
+ azim_list = [azim_list[i] for i in range(len(azim_list)) if flags[i]]
216
+
217
+ dist_list = [init_dist for _ in elev_list]
218
+ sector_list = ["good" for _ in elev_list] # HACK don't define sector names for now
219
+
220
+ return dist_list, elev_list, azim_list, sector_list
221
+
222
+
223
+ # ---------------- CAMERAS ----------------------
224
+
225
+
226
+ def init_camera(dist, elev, azim, image_size, device):
227
+ R, T = look_at_view_transform(dist, elev, azim)
228
+ image_size = torch.tensor([image_size, image_size]).unsqueeze(0)
229
+ cameras = PerspectiveCameras(R=R, T=T, device=device, image_size=image_size)
230
+
231
+ return cameras
text2tex/lib/constants.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PALETTE = {
2
+ 0: [255, 255, 255], # white - background
3
+ 1: [204, 50, 50], # red - old
4
+ 2: [231, 180, 22], # yellow - update
5
+ 3: [45, 201, 55] # green - new
6
+ }
7
+
8
+ QUAD_WEIGHTS = {
9
+ 0: 0, # background
10
+ 1: 0.1, # old
11
+ 2: 0.5, # update
12
+ 3: 1 # new
13
+ }
14
+
15
+ VIEWPOINTS = {
16
+ 1: {
17
+ "azim": [
18
+ 0
19
+ ],
20
+ "elev": [
21
+ 0
22
+ ],
23
+ "sector": [
24
+ "front"
25
+ ]
26
+ },
27
+ 2: {
28
+ "azim": [
29
+ 0,
30
+ 30
31
+ ],
32
+ "elev": [
33
+ 0,
34
+ 0
35
+ ],
36
+ "sector": [
37
+ "front",
38
+ "front"
39
+ ]
40
+ },
41
+ 4: {
42
+ "azim": [
43
+ 45,
44
+ 315,
45
+ 135,
46
+ 225,
47
+ ],
48
+ "elev": [
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0,
53
+ ],
54
+ "sector": [
55
+ "front right",
56
+ "front left",
57
+ "back right",
58
+ "back left",
59
+ ]
60
+ },
61
+ 6: {
62
+ "azim": [
63
+ 0,
64
+ 90,
65
+ 270,
66
+ 0,
67
+ 180,
68
+ 0
69
+ ],
70
+ "elev": [
71
+ 0,
72
+ 0,
73
+ 0,
74
+ 90,
75
+ 0,
76
+ -90
77
+ ],
78
+ "sector": [
79
+ "front",
80
+ "right",
81
+ "left",
82
+ "top",
83
+ "back",
84
+ "bottom",
85
+ ]
86
+ },
87
+ "shapenet": {
88
+ "azim": [
89
+ 270,
90
+ 315,
91
+ 225,
92
+ 0,
93
+ 180,
94
+ 45,
95
+ 135,
96
+ 90,
97
+ 270,
98
+ 270
99
+ ],
100
+ "elev": [
101
+ 15,
102
+ 15,
103
+ 15,
104
+ 15,
105
+ 15,
106
+ 15,
107
+ 15,
108
+ 15,
109
+ 90,
110
+ -90
111
+ ],
112
+ "sector": [
113
+ "front",
114
+ "front right",
115
+ "front left",
116
+ "right",
117
+ "left",
118
+ "back right",
119
+ "back left",
120
+ "back",
121
+ "top",
122
+ "bottom",
123
+ ]
124
+ },
125
+ "objaverse": {
126
+ "azim": [
127
+ 0,
128
+ 45,
129
+ 315,
130
+ 90,
131
+ 270,
132
+ 135,
133
+ 225,
134
+ 180,
135
+ 0,
136
+ 0
137
+ ],
138
+ "elev": [
139
+ 15,
140
+ 15,
141
+ 15,
142
+ 15,
143
+ 15,
144
+ 15,
145
+ 15,
146
+ 15,
147
+ 90,
148
+ -90
149
+ ],
150
+ "sector": [
151
+ "front",
152
+ "front right",
153
+ "front left",
154
+ "right",
155
+ "left",
156
+ "back right",
157
+ "back left",
158
+ "back",
159
+ "top",
160
+ "bottom",
161
+ ]
162
+ },
163
+ 12: {
164
+ "azim": [
165
+ 45,
166
+ 315,
167
+ 135,
168
+ 225,
169
+
170
+ 0,
171
+ 45,
172
+ 315,
173
+ 90,
174
+ 270,
175
+ 135,
176
+ 225,
177
+ 180,
178
+ ],
179
+ "elev": [
180
+ 0,
181
+ 0,
182
+ 0,
183
+ 0,
184
+
185
+ 45,
186
+ 45,
187
+ 45,
188
+ 45,
189
+ 45,
190
+ 45,
191
+ 45,
192
+ 45,
193
+ ],
194
+ "sector": [
195
+ "front right",
196
+ "front left",
197
+ "back right",
198
+ "back left",
199
+
200
+ "front",
201
+ "front right",
202
+ "front left",
203
+ "right",
204
+ "left",
205
+ "back right",
206
+ "back left",
207
+ "back",
208
+ ]
209
+ },
210
+ 20: {
211
+ "azim": [
212
+ 45,
213
+ 315,
214
+ 135,
215
+ 225,
216
+
217
+ 0,
218
+ 45,
219
+ 315,
220
+ 90,
221
+ 270,
222
+ 135,
223
+ 225,
224
+ 180,
225
+
226
+ 0,
227
+ 45,
228
+ 315,
229
+ 90,
230
+ 270,
231
+ 135,
232
+ 225,
233
+ 180,
234
+ ],
235
+ "elev": [
236
+ 0,
237
+ 0,
238
+ 0,
239
+ 0,
240
+
241
+ 30,
242
+ 30,
243
+ 30,
244
+ 30,
245
+ 30,
246
+ 30,
247
+ 30,
248
+ 30,
249
+
250
+ 60,
251
+ 60,
252
+ 60,
253
+ 60,
254
+ 60,
255
+ 60,
256
+ 60,
257
+ 60,
258
+ ],
259
+ "sector": [
260
+ "front right",
261
+ "front left",
262
+ "back right",
263
+ "back left",
264
+
265
+ "front",
266
+ "front right",
267
+ "front left",
268
+ "right",
269
+ "left",
270
+ "back right",
271
+ "back left",
272
+ "back",
273
+
274
+ "front",
275
+ "front right",
276
+ "front left",
277
+ "right",
278
+ "left",
279
+ "back right",
280
+ "back left",
281
+ "back",
282
+ ]
283
+ },
284
+ 36: {
285
+ "azim": [
286
+ 45,
287
+ 315,
288
+ 135,
289
+ 225,
290
+
291
+ 0,
292
+ 45,
293
+ 315,
294
+ 90,
295
+ 270,
296
+ 135,
297
+ 225,
298
+ 180,
299
+
300
+ 0,
301
+ 45,
302
+ 315,
303
+ 90,
304
+ 270,
305
+ 135,
306
+ 225,
307
+ 180,
308
+
309
+ 22.5,
310
+ 337.5,
311
+ 67.5,
312
+ 292.5,
313
+ 112.5,
314
+ 247.5,
315
+ 157.5,
316
+ 202.5,
317
+
318
+ 22.5,
319
+ 337.5,
320
+ 67.5,
321
+ 292.5,
322
+ 112.5,
323
+ 247.5,
324
+ 157.5,
325
+ 202.5,
326
+ ],
327
+ "elev": [
328
+ 0,
329
+ 0,
330
+ 0,
331
+ 0,
332
+
333
+ 30,
334
+ 30,
335
+ 30,
336
+ 30,
337
+ 30,
338
+ 30,
339
+ 30,
340
+ 30,
341
+
342
+ 60,
343
+ 60,
344
+ 60,
345
+ 60,
346
+ 60,
347
+ 60,
348
+ 60,
349
+ 60,
350
+
351
+ 15,
352
+ 15,
353
+ 15,
354
+ 15,
355
+ 15,
356
+ 15,
357
+ 15,
358
+ 15,
359
+
360
+ 45,
361
+ 45,
362
+ 45,
363
+ 45,
364
+ 45,
365
+ 45,
366
+ 45,
367
+ 45,
368
+ ],
369
+ "sector": [
370
+ "front right",
371
+ "front left",
372
+ "back right",
373
+ "back left",
374
+
375
+ "front",
376
+ "front right",
377
+ "front left",
378
+ "right",
379
+ "left",
380
+ "back right",
381
+ "back left",
382
+ "back",
383
+
384
+ "top front",
385
+ "top right",
386
+ "top left",
387
+ "top right",
388
+ "top left",
389
+ "top right",
390
+ "top left",
391
+ "top back",
392
+
393
+ "front right",
394
+ "front left",
395
+ "front right",
396
+ "front left",
397
+ "back right",
398
+ "back left",
399
+ "back right",
400
+ "back left",
401
+
402
+ "front right",
403
+ "front left",
404
+ "front right",
405
+ "front left",
406
+ "back right",
407
+ "back left",
408
+ "back right",
409
+ "back left",
410
+ ]
411
+ },
412
+ 68: {
413
+ "azim": [
414
+ 45,
415
+ 315,
416
+ 135,
417
+ 225,
418
+
419
+ 0,
420
+ 45,
421
+ 315,
422
+ 90,
423
+ 270,
424
+ 135,
425
+ 225,
426
+ 180,
427
+
428
+ 0,
429
+ 45,
430
+ 315,
431
+ 90,
432
+ 270,
433
+ 135,
434
+ 225,
435
+ 180,
436
+
437
+ 22.5,
438
+ 337.5,
439
+ 67.5,
440
+ 292.5,
441
+ 112.5,
442
+ 247.5,
443
+ 157.5,
444
+ 202.5,
445
+
446
+ 22.5,
447
+ 337.5,
448
+ 67.5,
449
+ 292.5,
450
+ 112.5,
451
+ 247.5,
452
+ 157.5,
453
+ 202.5,
454
+
455
+ 0,
456
+ 45,
457
+ 315,
458
+ 90,
459
+ 270,
460
+ 135,
461
+ 225,
462
+ 180,
463
+
464
+ 0,
465
+ 45,
466
+ 315,
467
+ 90,
468
+ 270,
469
+ 135,
470
+ 225,
471
+ 180,
472
+
473
+ 22.5,
474
+ 337.5,
475
+ 67.5,
476
+ 292.5,
477
+ 112.5,
478
+ 247.5,
479
+ 157.5,
480
+ 202.5,
481
+
482
+ 22.5,
483
+ 337.5,
484
+ 67.5,
485
+ 292.5,
486
+ 112.5,
487
+ 247.5,
488
+ 157.5,
489
+ 202.5
490
+ ],
491
+ "elev": [
492
+ 0,
493
+ 0,
494
+ 0,
495
+ 0,
496
+
497
+ 30,
498
+ 30,
499
+ 30,
500
+ 30,
501
+ 30,
502
+ 30,
503
+ 30,
504
+ 30,
505
+
506
+ 60,
507
+ 60,
508
+ 60,
509
+ 60,
510
+ 60,
511
+ 60,
512
+ 60,
513
+ 60,
514
+
515
+ 15,
516
+ 15,
517
+ 15,
518
+ 15,
519
+ 15,
520
+ 15,
521
+ 15,
522
+ 15,
523
+
524
+ 45,
525
+ 45,
526
+ 45,
527
+ 45,
528
+ 45,
529
+ 45,
530
+ 45,
531
+ 45,
532
+
533
+ -30,
534
+ -30,
535
+ -30,
536
+ -30,
537
+ -30,
538
+ -30,
539
+ -30,
540
+ -30,
541
+
542
+ -60,
543
+ -60,
544
+ -60,
545
+ -60,
546
+ -60,
547
+ -60,
548
+ -60,
549
+ -60,
550
+
551
+ -15,
552
+ -15,
553
+ -15,
554
+ -15,
555
+ -15,
556
+ -15,
557
+ -15,
558
+ -15,
559
+
560
+ -45,
561
+ -45,
562
+ -45,
563
+ -45,
564
+ -45,
565
+ -45,
566
+ -45,
567
+ -45,
568
+ ],
569
+ "sector": [
570
+ "front right",
571
+ "front left",
572
+ "back right",
573
+ "back left",
574
+
575
+ "front",
576
+ "front right",
577
+ "front left",
578
+ "right",
579
+ "left",
580
+ "back right",
581
+ "back left",
582
+ "back",
583
+
584
+ "top front",
585
+ "top right",
586
+ "top left",
587
+ "top right",
588
+ "top left",
589
+ "top right",
590
+ "top left",
591
+ "top back",
592
+
593
+ "front right",
594
+ "front left",
595
+ "front right",
596
+ "front left",
597
+ "back right",
598
+ "back left",
599
+ "back right",
600
+ "back left",
601
+
602
+ "front right",
603
+ "front left",
604
+ "front right",
605
+ "front left",
606
+ "back right",
607
+ "back left",
608
+ "back right",
609
+ "back left",
610
+
611
+ "front",
612
+ "front right",
613
+ "front left",
614
+ "right",
615
+ "left",
616
+ "back right",
617
+ "back left",
618
+ "back",
619
+
620
+ "bottom front",
621
+ "bottom right",
622
+ "bottom left",
623
+ "bottom right",
624
+ "bottom left",
625
+ "bottom right",
626
+ "bottom left",
627
+ "bottom back",
628
+
629
+ "bottom front right",
630
+ "bottom front left",
631
+ "bottom front right",
632
+ "bottom front left",
633
+ "bottom back right",
634
+ "bottom back left",
635
+ "bottom back right",
636
+ "bottom back left",
637
+
638
+ "bottom front right",
639
+ "bottom front left",
640
+ "bottom front right",
641
+ "bottom front left",
642
+ "bottom back right",
643
+ "bottom back left",
644
+ "bottom back right",
645
+ "bottom back left",
646
+ ]
647
+ }
648
+ }
text2tex/lib/diffusion_helper.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ from PIL import Image
7
+ from torchvision import transforms
8
+
9
+ # Stable Diffusion 2
10
+ from diffusers import (
11
+ StableDiffusionInpaintPipeline,
12
+ StableDiffusionPipeline,
13
+ EulerDiscreteScheduler
14
+ )
15
+
16
+ # customized
17
+ import sys
18
+ sys.path.append(".")
19
+
20
+ from models.ControlNet.gradio_depth2image import init_model, process
21
+
22
+
23
+ def get_controlnet_depth():
24
+ print("=> initializing ControlNet Depth...")
25
+ model, ddim_sampler = init_model()
26
+
27
+ return model, ddim_sampler
28
+
29
+
30
+ def get_inpainting(device):
31
+ print("=> initializing Inpainting...")
32
+
33
+ model = StableDiffusionInpaintPipeline.from_pretrained(
34
+ "stabilityai/stable-diffusion-2-inpainting",
35
+ torch_dtype=torch.float16,
36
+ ).to(device)
37
+
38
+ return model
39
+
40
+ def get_text2image(device):
41
+ print("=> initializing Inpainting...")
42
+
43
+ model_id = "stabilityai/stable-diffusion-2"
44
+ scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
45
+ model = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16).to(device)
46
+
47
+ return model
48
+
49
+
50
+ @torch.no_grad()
51
+ def apply_controlnet_depth(model, ddim_sampler,
52
+ init_image, prompt, strength, ddim_steps,
53
+ generate_mask_image, keep_mask_image, depth_map_np,
54
+ a_prompt, n_prompt, guidance_scale, seed, eta, num_samples,
55
+ device, blend=0, save_memory=False):
56
+ """
57
+ Use Stable Diffusion 2 to generate image
58
+
59
+ Arguments:
60
+ args: input arguments
61
+ model: Stable Diffusion 2 model
62
+ init_image_tensor: input image, torch.FloatTensor of shape (1, H, W, 3)
63
+ mask_tensor: depth map of the input image, torch.FloatTensor of shape (1, H, W, 1)
64
+ depth_map_np: depth map of the input image, torch.FloatTensor of shape (1, H, W)
65
+ """
66
+
67
+ print("=> generating ControlNet Depth RePaint image...")
68
+
69
+
70
+ # Stable Diffusion 2 receives PIL.Image
71
+ # NOTE Stable Diffusion 2 returns a PIL.Image object
72
+ # image and mask_image should be PIL images.
73
+ # The mask structure is white for inpainting and black for keeping as is
74
+ diffused_image_np = process(
75
+ model, ddim_sampler,
76
+ np.array(init_image), prompt, a_prompt, n_prompt, num_samples,
77
+ ddim_steps, guidance_scale, seed, eta,
78
+ strength=strength, detected_map=depth_map_np, unknown_mask=np.array(generate_mask_image), save_memory=save_memory
79
+ )[0]
80
+
81
+ init_image = init_image.convert("RGB")
82
+ diffused_image = Image.fromarray(diffused_image_np).convert("RGB")
83
+
84
+ if blend > 0 and transforms.ToTensor()(keep_mask_image).sum() > 0:
85
+ print("=> blending the generated region...")
86
+ kernel_size = 3
87
+ kernel = np.ones((kernel_size, kernel_size), np.uint8)
88
+
89
+ keep_image_np = np.array(init_image).astype(np.uint8)
90
+ keep_image_np_dilate = cv2.dilate(keep_image_np, kernel, iterations=1)
91
+
92
+ keep_mask_np = np.array(keep_mask_image).astype(np.uint8)
93
+ keep_mask_np_dilate = cv2.dilate(keep_mask_np, kernel, iterations=1)
94
+
95
+ generate_image_np = np.array(diffused_image).astype(np.uint8)
96
+
97
+ overlap_mask_np = np.array(generate_mask_image).astype(np.uint8)
98
+ overlap_mask_np *= keep_mask_np_dilate
99
+ print("=> blending {} pixels...".format(np.sum(overlap_mask_np)))
100
+
101
+ overlap_keep = keep_image_np_dilate[overlap_mask_np == 1]
102
+ overlap_generate = generate_image_np[overlap_mask_np == 1]
103
+
104
+ overlap_np = overlap_keep * blend + overlap_generate * (1 - blend)
105
+
106
+ generate_image_np[overlap_mask_np == 1] = overlap_np
107
+
108
+ diffused_image = Image.fromarray(generate_image_np.astype(np.uint8)).convert("RGB")
109
+
110
+ init_image_masked = init_image
111
+ diffused_image_masked = diffused_image
112
+
113
+ return diffused_image, init_image_masked, diffused_image_masked
114
+
115
+
116
+ @torch.no_grad()
117
+ def apply_inpainting(model,
118
+ init_image, mask_image_tensor, prompt, height, width, device):
119
+ """
120
+ Use Stable Diffusion 2 to generate image
121
+
122
+ Arguments:
123
+ args: input arguments
124
+ model: Stable Diffusion 2 model
125
+ init_image_tensor: input image, torch.FloatTensor of shape (1, H, W, 3)
126
+ mask_tensor: depth map of the input image, torch.FloatTensor of shape (1, H, W, 1)
127
+ depth_map_tensor: depth map of the input image, torch.FloatTensor of shape (1, H, W)
128
+ """
129
+
130
+ print("=> generating Inpainting image...")
131
+
132
+ mask_image = mask_image_tensor[0].cpu()
133
+ mask_image = mask_image.permute(2, 0, 1)
134
+ mask_image = transforms.ToPILImage()(mask_image).convert("L")
135
+
136
+ # NOTE Stable Diffusion 2 returns a PIL.Image object
137
+ # image and mask_image should be PIL images.
138
+ # The mask structure is white for inpainting and black for keeping as is
139
+ diffused_image = model(
140
+ prompt=prompt,
141
+ image=init_image.resize((512, 512)),
142
+ mask_image=mask_image.resize((512, 512)),
143
+ height=512,
144
+ width=512
145
+ ).images[0].resize((height, width))
146
+
147
+ return diffused_image
148
+
149
+
150
+ @torch.no_grad()
151
+ def apply_inpainting_postprocess(model,
152
+ init_image, mask_image_tensor, prompt, height, width, device):
153
+ """
154
+ Use Stable Diffusion 2 to generate image
155
+
156
+ Arguments:
157
+ args: input arguments
158
+ model: Stable Diffusion 2 model
159
+ init_image_tensor: input image, torch.FloatTensor of shape (1, H, W, 3)
160
+ mask_tensor: depth map of the input image, torch.FloatTensor of shape (1, H, W, 1)
161
+ depth_map_tensor: depth map of the input image, torch.FloatTensor of shape (1, H, W)
162
+ """
163
+
164
+ print("=> generating Inpainting image...")
165
+
166
+ mask_image = mask_image_tensor[0].cpu()
167
+ mask_image = mask_image.permute(2, 0, 1)
168
+ mask_image = transforms.ToPILImage()(mask_image).convert("L")
169
+
170
+ # NOTE Stable Diffusion 2 returns a PIL.Image object
171
+ # image and mask_image should be PIL images.
172
+ # The mask structure is white for inpainting and black for keeping as is
173
+ diffused_image = model(
174
+ prompt=prompt,
175
+ image=init_image.resize((512, 512)),
176
+ mask_image=mask_image.resize((512, 512)),
177
+ height=512,
178
+ width=512
179
+ ).images[0].resize((height, width))
180
+
181
+ diffused_image_tensor = torch.from_numpy(np.array(diffused_image)).to(device)
182
+
183
+ init_images_tensor = torch.from_numpy(np.array(init_image)).to(device)
184
+
185
+ init_images_tensor = diffused_image_tensor * mask_image_tensor[0] + init_images_tensor * (1 - mask_image_tensor[0])
186
+ init_image = Image.fromarray(init_images_tensor.cpu().numpy().astype(np.uint8)).convert("RGB")
187
+
188
+ return init_image
189
+
text2tex/lib/io_helper.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # common utils
2
+ import os
3
+ import json
4
+
5
+ # numpy
6
+ import numpy as np
7
+
8
+ # visualization
9
+ import matplotlib
10
+ import matplotlib.cm as cm
11
+ import matplotlib.pyplot as plt
12
+
13
+ matplotlib.use("Agg")
14
+
15
+ from pytorch3d.io import save_obj
16
+
17
+ from torchvision import transforms
18
+
19
+
20
+ def save_depth(fragments, output_dir, init_image, view_idx):
21
+ print("=> saving depth...")
22
+ width, height = init_image.size
23
+ dpi = 100
24
+ figsize = width / float(dpi), height / float(dpi)
25
+
26
+ depth_np = fragments.zbuf[0].cpu().numpy()
27
+
28
+ fig = plt.figure(figsize=figsize)
29
+ ax = fig.add_axes([0, 0, 1, 1])
30
+ # Hide spines, ticks, etc.
31
+ ax.axis('off')
32
+ # Display the image.
33
+ ax.imshow(depth_np, cmap='gray')
34
+
35
+ plt.savefig(os.path.join(output_dir, "{}.png".format(view_idx)), bbox_inches='tight', pad_inches=0)
36
+ np.save(os.path.join(output_dir, "{}.npy".format(view_idx)), depth_np[..., 0])
37
+
38
+
39
+ def save_backproject_obj(output_dir, obj_name,
40
+ verts, faces, verts_uvs, faces_uvs, projected_texture,
41
+ device):
42
+ print("=> saving OBJ file...")
43
+ texture_map = transforms.ToTensor()(projected_texture).to(device)
44
+ texture_map = texture_map.permute(1, 2, 0)
45
+ obj_path = os.path.join(output_dir, obj_name)
46
+
47
+ save_obj(
48
+ obj_path,
49
+ verts=verts,
50
+ faces=faces,
51
+ decimal_places=5,
52
+ verts_uvs=verts_uvs,
53
+ faces_uvs=faces_uvs,
54
+ texture_map=texture_map
55
+ )
56
+
57
+
58
+ def save_args(args, output_dir):
59
+ with open(os.path.join(output_dir, "args.json"), "w") as f:
60
+ json.dump(
61
+ {k: v for k, v in vars(args).items()},
62
+ f,
63
+ indent=4
64
+ )
65
+
66
+
67
+ def save_viewpoints(args, output_dir, dist_list, elev_list, azim_list, view_list):
68
+ with open(os.path.join(output_dir, "viewpoints.json"), "w") as f:
69
+ json.dump(
70
+ {
71
+ "dist": dist_list,
72
+ "elev": elev_list,
73
+ "azim": azim_list,
74
+ "view": view_list
75
+ },
76
+ f,
77
+ indent=4
78
+ )
text2tex/lib/mesh_helper.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import trimesh
4
+ import xatlas
5
+
6
+ import numpy as np
7
+
8
+ from sklearn.decomposition import PCA
9
+
10
+ from torchvision import transforms
11
+
12
+ from tqdm import tqdm
13
+
14
+ from pytorch3d.io import (
15
+ load_obj,
16
+ load_objs_as_meshes
17
+ )
18
+
19
+
20
+ def compute_principle_directions(model_path, num_points=20000):
21
+ mesh = trimesh.load_mesh(model_path, force="mesh")
22
+ pc, _ = trimesh.sample.sample_surface_even(mesh, num_points)
23
+
24
+ pc -= np.mean(pc, axis=0, keepdims=True)
25
+
26
+ principle_directions = PCA(n_components=3).fit(pc).components_
27
+
28
+ return principle_directions
29
+
30
+
31
+ def init_mesh(input_path, cache_path, device):
32
+ print("=> parameterizing target mesh...")
33
+
34
+ mesh = trimesh.load_mesh(input_path, force='mesh')
35
+ try:
36
+ vertices, faces = mesh.vertices, mesh.faces
37
+ except AttributeError:
38
+ print("multiple materials in {} are not supported".format(input_path))
39
+ exit()
40
+
41
+ vmapping, indices, uvs = xatlas.parametrize(vertices, faces)
42
+ xatlas.export(str(cache_path), vertices[vmapping], indices, uvs)
43
+
44
+ print("=> loading target mesh...")
45
+
46
+ # principle_directions = compute_principle_directions(cache_path)
47
+ principle_directions = None
48
+
49
+ _, faces, aux = load_obj(cache_path, device=device)
50
+ mesh = load_objs_as_meshes([cache_path], device=device)
51
+
52
+ num_verts = mesh.verts_packed().shape[0]
53
+
54
+ # make sure mesh center is at origin
55
+ bbox = mesh.get_bounding_boxes()
56
+ mesh_center = bbox.mean(dim=2).repeat(num_verts, 1)
57
+ mesh = apply_offsets_to_mesh(mesh, -mesh_center)
58
+
59
+ # make sure mesh size is normalized
60
+ box_size = bbox[..., 1] - bbox[..., 0]
61
+ box_max = box_size.max(dim=1, keepdim=True)[0].repeat(num_verts, 3)
62
+ mesh = apply_scale_to_mesh(mesh, 1 / box_max)
63
+
64
+ return mesh, mesh.verts_packed(), faces, aux, principle_directions, mesh_center, box_max
65
+
66
+
67
+ def apply_offsets_to_mesh(mesh, offsets):
68
+ new_mesh = mesh.offset_verts(offsets)
69
+
70
+ return new_mesh
71
+
72
+ def apply_scale_to_mesh(mesh, scale):
73
+ new_mesh = mesh.scale_verts(scale)
74
+
75
+ return new_mesh
76
+
77
+
78
+ def adjust_uv_map(faces, aux, init_texture, uv_size):
79
+ """
80
+ adjust UV map to be compatiable with multiple textures.
81
+ UVs for different materials will be decomposed and placed horizontally
82
+
83
+ +-----+-----+-----+--
84
+ | 1 | 2 | 3 |
85
+ +-----+-----+-----+--
86
+
87
+ """
88
+
89
+ textures_ids = faces.textures_idx
90
+ materials_idx = faces.materials_idx
91
+ verts_uvs = aux.verts_uvs
92
+
93
+ num_materials = torch.unique(materials_idx).shape[0]
94
+
95
+ new_verts_uvs = verts_uvs.clone()
96
+ for material_id in range(num_materials):
97
+ # apply offsets to horizontal axis
98
+ faces_ids = textures_ids[materials_idx == material_id].unique()
99
+ new_verts_uvs[faces_ids, 0] += material_id
100
+
101
+ new_verts_uvs[:, 0] /= num_materials
102
+
103
+ init_texture_tensor = transforms.ToTensor()(init_texture)
104
+ init_texture_tensor = torch.cat([init_texture_tensor for _ in range(num_materials)], dim=-1)
105
+ init_texture = transforms.ToPILImage()(init_texture_tensor).resize((uv_size, uv_size))
106
+
107
+ return new_verts_uvs, init_texture
108
+
109
+
110
+ @torch.no_grad()
111
+ def update_face_angles(mesh, cameras, fragments):
112
+ def get_angle(x, y):
113
+ x = torch.nn.functional.normalize(x)
114
+ y = torch.nn.functional.normalize(y)
115
+ inner_product = (x * y).sum(dim=1)
116
+ x_norm = x.pow(2).sum(dim=1).pow(0.5)
117
+ y_norm = y.pow(2).sum(dim=1).pow(0.5)
118
+ cos = inner_product / (x_norm * y_norm)
119
+ angle = torch.acos(cos)
120
+ angle = angle * 180 / 3.14159
121
+
122
+ return angle
123
+
124
+ # face normals
125
+ face_normals = mesh.faces_normals_padded()[0]
126
+
127
+ # view vector (object center -> camera center)
128
+ camera_center = cameras.get_camera_center()
129
+
130
+ face_angles = get_angle(
131
+ face_normals,
132
+ camera_center.repeat(face_normals.shape[0], 1)
133
+ ) # (F)
134
+
135
+ face_angles_rev = get_angle(
136
+ face_normals,
137
+ -camera_center.repeat(face_normals.shape[0], 1)
138
+ ) # (F)
139
+
140
+ face_angles = torch.minimum(face_angles, face_angles_rev)
141
+
142
+ # Indices of unique visible faces
143
+ visible_map = fragments.pix_to_face.unique() # (num_visible_faces)
144
+ invisible_mask = torch.ones_like(face_angles)
145
+ invisible_mask[visible_map] = 0
146
+ face_angles[invisible_mask == 1] = 10000. # angles of invisible faces are ignored
147
+
148
+ return face_angles
text2tex/lib/projection_helper.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ import cv2
5
+ import random
6
+
7
+ import numpy as np
8
+
9
+ from torchvision import transforms
10
+
11
+ from pytorch3d.renderer import TexturesUV
12
+ from pytorch3d.ops import interpolate_face_attributes
13
+
14
+ from PIL import Image
15
+
16
+ from tqdm import tqdm
17
+
18
+ # customized
19
+ import sys
20
+ sys.path.append(".")
21
+
22
+ from lib.camera_helper import init_camera
23
+ from lib.render_helper import init_renderer, render
24
+ from lib.shading_helper import (
25
+ BlendParams,
26
+ init_soft_phong_shader,
27
+ init_flat_texel_shader,
28
+ )
29
+ from lib.vis_helper import visualize_outputs, visualize_quad_mask
30
+ from lib.constants import *
31
+
32
+
33
+ def get_all_4_locations(values_y, values_x):
34
+ y_0 = torch.floor(values_y)
35
+ y_1 = torch.ceil(values_y)
36
+ x_0 = torch.floor(values_x)
37
+ x_1 = torch.ceil(values_x)
38
+
39
+ return torch.cat([y_0, y_0, y_1, y_1], 0).long(), torch.cat([x_0, x_1, x_0, x_1], 0).long()
40
+
41
+
42
+ def compose_quad_mask(new_mask_image, update_mask_image, old_mask_image, device):
43
+ """
44
+ compose quad mask:
45
+ -> 0: background
46
+ -> 1: old
47
+ -> 2: update
48
+ -> 3: new
49
+ """
50
+
51
+ new_mask_tensor = transforms.ToTensor()(new_mask_image).to(device)
52
+ update_mask_tensor = transforms.ToTensor()(update_mask_image).to(device)
53
+ old_mask_tensor = transforms.ToTensor()(old_mask_image).to(device)
54
+
55
+ all_mask_tensor = new_mask_tensor + update_mask_tensor + old_mask_tensor
56
+
57
+ quad_mask_tensor = torch.zeros_like(all_mask_tensor)
58
+ quad_mask_tensor[old_mask_tensor == 1] = 1
59
+ quad_mask_tensor[update_mask_tensor == 1] = 2
60
+ quad_mask_tensor[new_mask_tensor == 1] = 3
61
+
62
+ return old_mask_tensor, update_mask_tensor, new_mask_tensor, all_mask_tensor, quad_mask_tensor
63
+
64
+
65
+ def compute_view_heat(similarity_tensor, quad_mask_tensor):
66
+ num_total_pixels = quad_mask_tensor.reshape(-1).shape[0]
67
+ heat = 0
68
+ for idx in QUAD_WEIGHTS:
69
+ heat += (quad_mask_tensor == idx).sum() * QUAD_WEIGHTS[idx] / num_total_pixels
70
+
71
+ return heat
72
+
73
+
74
+ def select_viewpoint(selected_view_ids, view_punishments,
75
+ mode, dist_list, elev_list, azim_list, sector_list, view_idx,
76
+ similarity_texture_cache, exist_texture,
77
+ mesh, faces, verts_uvs,
78
+ image_size, faces_per_pixel,
79
+ init_image_dir, mask_image_dir, normal_map_dir, depth_map_dir, similarity_map_dir,
80
+ device, use_principle=False
81
+ ):
82
+ if mode == "sequential":
83
+
84
+ num_views = len(dist_list)
85
+
86
+ dist = dist_list[view_idx % num_views]
87
+ elev = elev_list[view_idx % num_views]
88
+ azim = azim_list[view_idx % num_views]
89
+ sector = sector_list[view_idx % num_views]
90
+
91
+ selected_view_ids.append(view_idx % num_views)
92
+
93
+ elif mode == "heuristic":
94
+
95
+ if use_principle and view_idx < 6:
96
+
97
+ selected_view_idx = view_idx
98
+
99
+ else:
100
+
101
+ selected_view_idx = None
102
+ max_heat = 0
103
+
104
+ print("=> selecting next view...")
105
+ view_heat_list = []
106
+ for sample_idx in tqdm(range(len(dist_list))):
107
+
108
+ view_heat, *_ = render_one_view_and_build_masks(dist_list[sample_idx], elev_list[sample_idx], azim_list[sample_idx],
109
+ sample_idx, sample_idx, view_punishments,
110
+ similarity_texture_cache, exist_texture,
111
+ mesh, faces, verts_uvs,
112
+ image_size, faces_per_pixel,
113
+ init_image_dir, mask_image_dir, normal_map_dir, depth_map_dir, similarity_map_dir,
114
+ device)
115
+
116
+ if view_heat > max_heat:
117
+ selected_view_idx = sample_idx
118
+ max_heat = view_heat
119
+
120
+ view_heat_list.append(view_heat.item())
121
+
122
+ print(view_heat_list)
123
+ print("select view {} with heat {}".format(selected_view_idx, max_heat))
124
+
125
+
126
+ dist = dist_list[selected_view_idx]
127
+ elev = elev_list[selected_view_idx]
128
+ azim = azim_list[selected_view_idx]
129
+ sector = sector_list[selected_view_idx]
130
+
131
+ selected_view_ids.append(selected_view_idx)
132
+
133
+ view_punishments[selected_view_idx] *= 0.01
134
+
135
+ elif mode == "random":
136
+
137
+ selected_view_idx = random.choice(range(len(dist_list)))
138
+
139
+ dist = dist_list[selected_view_idx]
140
+ elev = elev_list[selected_view_idx]
141
+ azim = azim_list[selected_view_idx]
142
+ sector = sector_list[selected_view_idx]
143
+
144
+ selected_view_ids.append(selected_view_idx)
145
+
146
+ else:
147
+ raise NotImplementedError()
148
+
149
+ return dist, elev, azim, sector, selected_view_ids, view_punishments
150
+
151
+
152
+ @torch.no_grad()
153
+ def build_backproject_mask(mesh, faces, verts_uvs,
154
+ cameras, reference_image, faces_per_pixel,
155
+ image_size, uv_size, device):
156
+ # construct pixel UVs
157
+ renderer_scaled = init_renderer(cameras,
158
+ shader=init_soft_phong_shader(
159
+ camera=cameras,
160
+ blend_params=BlendParams(),
161
+ device=device),
162
+ image_size=image_size,
163
+ faces_per_pixel=faces_per_pixel
164
+ )
165
+ fragments_scaled = renderer_scaled.rasterizer(mesh)
166
+
167
+ # get UV coordinates for each pixel
168
+ faces_verts_uvs = verts_uvs[faces.textures_idx]
169
+
170
+ pixel_uvs = interpolate_face_attributes(
171
+ fragments_scaled.pix_to_face, fragments_scaled.bary_coords, faces_verts_uvs
172
+ ) # NxHsxWsxKx2
173
+ pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(-1, 2)
174
+
175
+ texture_locations_y, texture_locations_x = get_all_4_locations(
176
+ (1 - pixel_uvs[:, 1]).reshape(-1) * (uv_size - 1),
177
+ pixel_uvs[:, 0].reshape(-1) * (uv_size - 1)
178
+ )
179
+
180
+ K = faces_per_pixel
181
+
182
+ texture_values = torch.from_numpy(np.array(reference_image.resize((image_size, image_size)))).float() / 255.
183
+ texture_values = texture_values.to(device).unsqueeze(0).expand([4, -1, -1, -1]).unsqueeze(0).expand([K, -1, -1, -1, -1])
184
+
185
+ # texture
186
+ texture_tensor = torch.zeros(uv_size, uv_size, 3).to(device)
187
+ texture_tensor[texture_locations_y, texture_locations_x, :] = texture_values.reshape(-1, 3)
188
+
189
+ return texture_tensor[:, :, 0]
190
+
191
+
192
+ @torch.no_grad()
193
+ def build_diffusion_mask(mesh_stuff,
194
+ renderer, exist_texture, similarity_texture_cache, target_value, device, image_size,
195
+ smooth_mask=False, view_threshold=0.01):
196
+
197
+ mesh, faces, verts_uvs = mesh_stuff
198
+ mask_mesh = mesh.clone() # NOTE in-place operation - DANGER!!!
199
+
200
+ # visible mask => the whole region
201
+ exist_texture_expand = exist_texture.unsqueeze(0).unsqueeze(-1).expand(-1, -1, -1, 3).to(device)
202
+ mask_mesh.textures = TexturesUV(
203
+ maps=torch.ones_like(exist_texture_expand),
204
+ faces_uvs=faces.textures_idx[None, ...],
205
+ verts_uvs=verts_uvs[None, ...],
206
+ sampling_mode="nearest"
207
+ )
208
+ # visible_mask_tensor, *_ = render(mask_mesh, renderer)
209
+ visible_mask_tensor, _, similarity_map_tensor, *_ = render(mask_mesh, renderer)
210
+ # faces that are too rotated away from the viewpoint will be treated as invisible
211
+ valid_mask_tensor = (similarity_map_tensor >= view_threshold).float()
212
+ visible_mask_tensor *= valid_mask_tensor
213
+
214
+ # nonexist mask <=> new mask
215
+ exist_texture_expand = exist_texture.unsqueeze(0).unsqueeze(-1).expand(-1, -1, -1, 3).to(device)
216
+ mask_mesh.textures = TexturesUV(
217
+ maps=1 - exist_texture_expand,
218
+ faces_uvs=faces.textures_idx[None, ...],
219
+ verts_uvs=verts_uvs[None, ...],
220
+ sampling_mode="nearest"
221
+ )
222
+ new_mask_tensor, *_ = render(mask_mesh, renderer)
223
+ new_mask_tensor *= valid_mask_tensor
224
+
225
+ # exist mask => visible mask - new mask
226
+ exist_mask_tensor = visible_mask_tensor - new_mask_tensor
227
+ exist_mask_tensor[exist_mask_tensor < 0] = 0 # NOTE dilate can lead to overflow
228
+
229
+ # all update mask
230
+ mask_mesh.textures = TexturesUV(
231
+ maps=(
232
+ similarity_texture_cache.argmax(0) == target_value
233
+ # # only consider the views that have already appeared before
234
+ # similarity_texture_cache[0:target_value+1].argmax(0) == target_value
235
+ ).float().unsqueeze(0).unsqueeze(-1).expand(-1, -1, -1, 3).to(device),
236
+ faces_uvs=faces.textures_idx[None, ...],
237
+ verts_uvs=verts_uvs[None, ...],
238
+ sampling_mode="nearest"
239
+ )
240
+ all_update_mask_tensor, *_ = render(mask_mesh, renderer)
241
+
242
+ # current update mask => intersection between all update mask and exist mask
243
+ update_mask_tensor = exist_mask_tensor * all_update_mask_tensor
244
+
245
+ # keep mask => exist mask - update mask
246
+ old_mask_tensor = exist_mask_tensor - update_mask_tensor
247
+
248
+ # convert
249
+ new_mask = new_mask_tensor[0].cpu().float().permute(2, 0, 1)
250
+ new_mask = transforms.ToPILImage()(new_mask).convert("L")
251
+
252
+ update_mask = update_mask_tensor[0].cpu().float().permute(2, 0, 1)
253
+ update_mask = transforms.ToPILImage()(update_mask).convert("L")
254
+
255
+ old_mask = old_mask_tensor[0].cpu().float().permute(2, 0, 1)
256
+ old_mask = transforms.ToPILImage()(old_mask).convert("L")
257
+
258
+ exist_mask = exist_mask_tensor[0].cpu().float().permute(2, 0, 1)
259
+ exist_mask = transforms.ToPILImage()(exist_mask).convert("L")
260
+
261
+ return new_mask, update_mask, old_mask, exist_mask
262
+
263
+
264
+ @torch.no_grad()
265
+ def render_one_view(mesh,
266
+ dist, elev, azim,
267
+ image_size, faces_per_pixel,
268
+ device):
269
+
270
+ # render the view
271
+ cameras = init_camera(
272
+ dist, elev, azim,
273
+ image_size, device
274
+ )
275
+ renderer = init_renderer(cameras,
276
+ shader=init_soft_phong_shader(
277
+ camera=cameras,
278
+ blend_params=BlendParams(),
279
+ device=device),
280
+ image_size=image_size,
281
+ faces_per_pixel=faces_per_pixel
282
+ )
283
+
284
+ init_images_tensor, normal_maps_tensor, similarity_tensor, depth_maps_tensor, fragments = render(mesh, renderer)
285
+
286
+ return (
287
+ cameras, renderer,
288
+ init_images_tensor, normal_maps_tensor, similarity_tensor, depth_maps_tensor, fragments
289
+ )
290
+
291
+
292
+ @torch.no_grad()
293
+ def build_similarity_texture_cache_for_all_views(mesh, faces, verts_uvs,
294
+ dist_list, elev_list, azim_list,
295
+ image_size, image_size_scaled, uv_size, faces_per_pixel,
296
+ device):
297
+
298
+ num_candidate_views = len(dist_list)
299
+ similarity_texture_cache = torch.zeros(num_candidate_views, uv_size, uv_size).to(device)
300
+
301
+ print("=> building similarity texture cache for all views...")
302
+ for i in tqdm(range(num_candidate_views)):
303
+ cameras, _, _, _, similarity_tensor, _, _ = render_one_view(mesh,
304
+ dist_list[i], elev_list[i], azim_list[i],
305
+ image_size, faces_per_pixel, device)
306
+
307
+ similarity_texture_cache[i] = build_backproject_mask(mesh, faces, verts_uvs,
308
+ cameras, transforms.ToPILImage()(similarity_tensor[0, :, :, 0]).convert("RGB"), faces_per_pixel,
309
+ image_size_scaled, uv_size, device)
310
+
311
+ return similarity_texture_cache
312
+
313
+
314
+ @torch.no_grad()
315
+ def render_one_view_and_build_masks(dist, elev, azim,
316
+ selected_view_idx, view_idx, view_punishments,
317
+ similarity_texture_cache, exist_texture,
318
+ mesh, faces, verts_uvs,
319
+ image_size, faces_per_pixel,
320
+ init_image_dir, mask_image_dir, normal_map_dir, depth_map_dir, similarity_map_dir,
321
+ device, save_intermediate=False, smooth_mask=False, view_threshold=0.01):
322
+
323
+ # render the view
324
+ (
325
+ cameras, renderer,
326
+ init_images_tensor, normal_maps_tensor, similarity_tensor, depth_maps_tensor, fragments
327
+ ) = render_one_view(mesh,
328
+ dist, elev, azim,
329
+ image_size, faces_per_pixel,
330
+ device
331
+ )
332
+
333
+ init_image = init_images_tensor[0].cpu()
334
+ init_image = init_image.permute(2, 0, 1)
335
+ init_image = transforms.ToPILImage()(init_image).convert("RGB")
336
+
337
+ normal_map = normal_maps_tensor[0].cpu()
338
+ normal_map = normal_map.permute(2, 0, 1)
339
+ normal_map = transforms.ToPILImage()(normal_map).convert("RGB")
340
+
341
+ depth_map = depth_maps_tensor[0].cpu().numpy()
342
+ depth_map = Image.fromarray(depth_map).convert("L")
343
+
344
+ similarity_map = similarity_tensor[0, :, :, 0].cpu()
345
+ similarity_map = transforms.ToPILImage()(similarity_map).convert("L")
346
+
347
+
348
+ flat_renderer = init_renderer(cameras,
349
+ shader=init_flat_texel_shader(
350
+ camera=cameras,
351
+ device=device),
352
+ image_size=image_size,
353
+ faces_per_pixel=faces_per_pixel
354
+ )
355
+ new_mask_image, update_mask_image, old_mask_image, exist_mask_image = build_diffusion_mask(
356
+ (mesh, faces, verts_uvs),
357
+ flat_renderer, exist_texture, similarity_texture_cache, selected_view_idx, device, image_size,
358
+ smooth_mask=smooth_mask, view_threshold=view_threshold
359
+ )
360
+ # NOTE the view idx is the absolute idx in the sample space (i.e. `selected_view_idx`)
361
+ # it should match with `similarity_texture_cache`
362
+
363
+ (
364
+ old_mask_tensor,
365
+ update_mask_tensor,
366
+ new_mask_tensor,
367
+ all_mask_tensor,
368
+ quad_mask_tensor
369
+ ) = compose_quad_mask(new_mask_image, update_mask_image, old_mask_image, device)
370
+
371
+ view_heat = compute_view_heat(similarity_tensor, quad_mask_tensor)
372
+ view_heat *= view_punishments[selected_view_idx]
373
+
374
+ # save intermediate results
375
+ if save_intermediate:
376
+ init_image.save(os.path.join(init_image_dir, "{}.png".format(view_idx)))
377
+ normal_map.save(os.path.join(normal_map_dir, "{}.png".format(view_idx)))
378
+ depth_map.save(os.path.join(depth_map_dir, "{}.png".format(view_idx)))
379
+ similarity_map.save(os.path.join(similarity_map_dir, "{}.png".format(view_idx)))
380
+
381
+ new_mask_image.save(os.path.join(mask_image_dir, "{}_new.png".format(view_idx)))
382
+ update_mask_image.save(os.path.join(mask_image_dir, "{}_update.png".format(view_idx)))
383
+ old_mask_image.save(os.path.join(mask_image_dir, "{}_old.png".format(view_idx)))
384
+ exist_mask_image.save(os.path.join(mask_image_dir, "{}_exist.png".format(view_idx)))
385
+
386
+ visualize_quad_mask(mask_image_dir, quad_mask_tensor, view_idx, view_heat, device)
387
+
388
+ return (
389
+ view_heat,
390
+ renderer, cameras, fragments,
391
+ init_image, normal_map, depth_map,
392
+ init_images_tensor, normal_maps_tensor, depth_maps_tensor, similarity_tensor,
393
+ old_mask_image, update_mask_image, new_mask_image,
394
+ old_mask_tensor, update_mask_tensor, new_mask_tensor, all_mask_tensor, quad_mask_tensor
395
+ )
396
+
397
+
398
+
399
+ @torch.no_grad()
400
+ def backproject_from_image(mesh, faces, verts_uvs, cameras,
401
+ reference_image, new_mask_image, update_mask_image,
402
+ init_texture, exist_texture,
403
+ image_size, uv_size, faces_per_pixel,
404
+ device):
405
+
406
+ # construct pixel UVs
407
+ renderer_scaled = init_renderer(cameras,
408
+ shader=init_soft_phong_shader(
409
+ camera=cameras,
410
+ blend_params=BlendParams(),
411
+ device=device),
412
+ image_size=image_size,
413
+ faces_per_pixel=faces_per_pixel
414
+ )
415
+ fragments_scaled = renderer_scaled.rasterizer(mesh)
416
+
417
+ # get UV coordinates for each pixel
418
+ faces_verts_uvs = verts_uvs[faces.textures_idx]
419
+
420
+ pixel_uvs = interpolate_face_attributes(
421
+ fragments_scaled.pix_to_face, fragments_scaled.bary_coords, faces_verts_uvs
422
+ ) # NxHsxWsxKx2
423
+ pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(pixel_uvs.shape[-2], pixel_uvs.shape[1], pixel_uvs.shape[2], 2)
424
+
425
+ # the update mask has to be on top of the diffusion mask
426
+ new_mask_image_tensor = transforms.ToTensor()(new_mask_image).to(device).unsqueeze(-1)
427
+ update_mask_image_tensor = transforms.ToTensor()(update_mask_image).to(device).unsqueeze(-1)
428
+
429
+ project_mask_image_tensor = torch.logical_or(update_mask_image_tensor, new_mask_image_tensor).float()
430
+ project_mask_image = project_mask_image_tensor * 255.
431
+ project_mask_image = Image.fromarray(project_mask_image[0, :, :, 0].cpu().numpy().astype(np.uint8))
432
+
433
+ project_mask_image_scaled = project_mask_image.resize(
434
+ (image_size, image_size),
435
+ Image.Resampling.NEAREST
436
+ )
437
+ project_mask_image_tensor_scaled = transforms.ToTensor()(project_mask_image_scaled).to(device)
438
+
439
+ pixel_uvs_masked = pixel_uvs[project_mask_image_tensor_scaled == 1]
440
+
441
+ texture_locations_y, texture_locations_x = get_all_4_locations(
442
+ (1 - pixel_uvs_masked[:, 1]).reshape(-1) * (uv_size - 1),
443
+ pixel_uvs_masked[:, 0].reshape(-1) * (uv_size - 1)
444
+ )
445
+
446
+ K = pixel_uvs.shape[0]
447
+ project_mask_image_tensor_scaled = project_mask_image_tensor_scaled[:, None, :, :, None].repeat(1, 4, 1, 1, 3)
448
+
449
+ texture_values = torch.from_numpy(np.array(reference_image.resize((image_size, image_size))))
450
+ texture_values = texture_values.to(device).unsqueeze(0).expand([4, -1, -1, -1]).unsqueeze(0).expand([K, -1, -1, -1, -1])
451
+
452
+ texture_values_masked = texture_values.reshape(-1, 3)[project_mask_image_tensor_scaled.reshape(-1, 3) == 1].reshape(-1, 3)
453
+
454
+ # texture
455
+ texture_tensor = torch.from_numpy(np.array(init_texture)).to(device)
456
+ texture_tensor[texture_locations_y, texture_locations_x, :] = texture_values_masked
457
+
458
+ init_texture = Image.fromarray(texture_tensor.cpu().numpy().astype(np.uint8))
459
+
460
+ # update texture cache
461
+ exist_texture[texture_locations_y, texture_locations_x] = 1
462
+
463
+ return init_texture, project_mask_image, exist_texture
464
+
text2tex/lib/render_helper.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ import cv2
5
+
6
+ import numpy as np
7
+
8
+ from PIL import Image
9
+
10
+ from torchvision import transforms
11
+ from pytorch3d.ops import interpolate_face_attributes
12
+ from pytorch3d.renderer import (
13
+ RasterizationSettings,
14
+ MeshRendererWithFragments,
15
+ MeshRasterizer,
16
+ )
17
+
18
+ # customized
19
+ import sys
20
+ sys.path.append(".")
21
+
22
+
23
+ def init_renderer(camera, shader, image_size, faces_per_pixel):
24
+ raster_settings = RasterizationSettings(image_size=image_size, faces_per_pixel=faces_per_pixel)
25
+ renderer = MeshRendererWithFragments(
26
+ rasterizer=MeshRasterizer(
27
+ cameras=camera,
28
+ raster_settings=raster_settings
29
+ ),
30
+ shader=shader
31
+ )
32
+
33
+ return renderer
34
+
35
+
36
+ @torch.no_grad()
37
+ def render(mesh, renderer, pad_value=10):
38
+ def phong_normal_shading(meshes, fragments) -> torch.Tensor:
39
+ faces = meshes.faces_packed() # (F, 3)
40
+ vertex_normals = meshes.verts_normals_packed() # (V, 3)
41
+ faces_normals = vertex_normals[faces]
42
+ pixel_normals = interpolate_face_attributes(
43
+ fragments.pix_to_face, fragments.bary_coords, faces_normals
44
+ )
45
+
46
+ return pixel_normals
47
+
48
+ def similarity_shading(meshes, fragments):
49
+ faces = meshes.faces_packed() # (F, 3)
50
+ vertex_normals = meshes.verts_normals_packed() # (V, 3)
51
+ faces_normals = vertex_normals[faces]
52
+ vertices = meshes.verts_packed() # (V, 3)
53
+ face_positions = vertices[faces]
54
+ view_directions = torch.nn.functional.normalize((renderer.shader.cameras.get_camera_center().reshape(1, 1, 3) - face_positions), p=2, dim=2)
55
+ cosine_similarity = torch.nn.CosineSimilarity(dim=2)(faces_normals, view_directions)
56
+ pixel_similarity = interpolate_face_attributes(
57
+ fragments.pix_to_face, fragments.bary_coords, cosine_similarity.unsqueeze(-1)
58
+ )
59
+
60
+ return pixel_similarity
61
+
62
+ def get_relative_depth_map(fragments, pad_value=pad_value):
63
+ absolute_depth = fragments.zbuf[..., 0] # B, H, W
64
+ no_depth = -1
65
+
66
+ depth_min, depth_max = absolute_depth[absolute_depth != no_depth].min(), absolute_depth[absolute_depth != no_depth].max()
67
+ target_min, target_max = 50, 255
68
+
69
+ depth_value = absolute_depth[absolute_depth != no_depth]
70
+ depth_value = depth_max - depth_value # reverse values
71
+
72
+ depth_value /= (depth_max - depth_min)
73
+ depth_value = depth_value * (target_max - target_min) + target_min
74
+
75
+ relative_depth = absolute_depth.clone()
76
+ relative_depth[absolute_depth != no_depth] = depth_value
77
+ relative_depth[absolute_depth == no_depth] = pad_value # not completely black
78
+
79
+ return relative_depth
80
+
81
+
82
+ images, fragments = renderer(mesh)
83
+ normal_maps = phong_normal_shading(mesh, fragments).squeeze(-2)
84
+ similarity_maps = similarity_shading(mesh, fragments).squeeze(-2) # -1 - 1
85
+ depth_maps = get_relative_depth_map(fragments)
86
+
87
+ # normalize similarity mask to 0 - 1
88
+ similarity_maps = torch.abs(similarity_maps) # 0 - 1
89
+
90
+ # HACK erode, eliminate isolated dots
91
+ non_zero_similarity = (similarity_maps > 0).float()
92
+ non_zero_similarity = (non_zero_similarity * 255.).cpu().numpy().astype(np.uint8)[0]
93
+ non_zero_similarity = cv2.erode(non_zero_similarity, kernel=np.ones((3, 3), np.uint8), iterations=2)
94
+ non_zero_similarity = torch.from_numpy(non_zero_similarity).to(similarity_maps.device).unsqueeze(0) / 255.
95
+ similarity_maps = non_zero_similarity.unsqueeze(-1) * similarity_maps
96
+
97
+ return images, normal_maps, similarity_maps, depth_maps, fragments
98
+
99
+
100
+ @torch.no_grad()
101
+ def check_visible_faces(mesh, fragments):
102
+ pix_to_face = fragments.pix_to_face
103
+
104
+ # Indices of unique visible faces
105
+ visible_map = pix_to_face.unique() # (num_visible_faces)
106
+
107
+ return visible_map
108
+
text2tex/lib/shading_helper.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import NamedTuple, Sequence
2
+
3
+ from pytorch3d.renderer.mesh.shader import ShaderBase
4
+ from pytorch3d.renderer import (
5
+ AmbientLights,
6
+ SoftPhongShader
7
+ )
8
+
9
+
10
+ class BlendParams(NamedTuple):
11
+ sigma: float = 1e-4
12
+ gamma: float = 1e-4
13
+ background_color: Sequence = (1, 1, 1)
14
+
15
+
16
+ class FlatTexelShader(ShaderBase):
17
+
18
+ def __init__(self, device="cpu", cameras=None, lights=None, materials=None, blend_params=None):
19
+ super().__init__(device, cameras, lights, materials, blend_params)
20
+
21
+ def forward(self, fragments, meshes, **_kwargs):
22
+ texels = meshes.sample_textures(fragments)
23
+ texels[(fragments.pix_to_face == -1), :] = 0
24
+ return texels.squeeze(-2)
25
+
26
+
27
+ def init_soft_phong_shader(camera, blend_params, device):
28
+ lights = AmbientLights(device=device)
29
+ shader = SoftPhongShader(
30
+ cameras=camera,
31
+ lights=lights,
32
+ device=device,
33
+ blend_params=blend_params
34
+ )
35
+
36
+ return shader
37
+
38
+
39
+ def init_flat_texel_shader(camera, device):
40
+ shader=FlatTexelShader(
41
+ cameras=camera,
42
+ device=device
43
+ )
44
+
45
+ return shader
text2tex/lib/vis_helper.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ import numpy as np
5
+
6
+ # visualization
7
+ import matplotlib
8
+ import matplotlib.cm as cm
9
+ import matplotlib.pyplot as plt
10
+
11
+ matplotlib.use("Agg")
12
+
13
+ from PIL import Image
14
+
15
+ # GIF
16
+ import imageio.v2 as imageio
17
+
18
+ # customized
19
+ import sys
20
+ sys.path.append(".")
21
+
22
+ from lib.constants import *
23
+ from lib.camera_helper import polar_to_xyz
24
+
25
+ def visualize_quad_mask(mask_image_dir, quad_mask_tensor, view_idx, view_score, device):
26
+ quad_mask_tensor = quad_mask_tensor.unsqueeze(-1).repeat(1, 1, 1, 3)
27
+ quad_mask_image_tensor = torch.zeros_like(quad_mask_tensor)
28
+
29
+ for idx in PALETTE:
30
+ selected = quad_mask_tensor[quad_mask_tensor == idx].reshape(-1, 3)
31
+ selected = torch.FloatTensor(PALETTE[idx]).to(device).unsqueeze(0).repeat(selected.shape[0], 1)
32
+
33
+ quad_mask_image_tensor[quad_mask_tensor == idx] = selected.reshape(-1)
34
+
35
+ quad_mask_image_np = quad_mask_image_tensor[0].cpu().numpy().astype(np.uint8)
36
+ quad_mask_image = Image.fromarray(quad_mask_image_np).convert("RGB")
37
+ quad_mask_image.save(os.path.join(mask_image_dir, "{}_quad_{:.5f}.png".format(view_idx, view_score)))
38
+
39
+
40
+ def visualize_outputs(output_dir, init_image_dir, mask_image_dir, inpainted_image_dir, num_views):
41
+ # subplot settings
42
+ num_col = 3
43
+ num_row = 1
44
+ subplot_size = 4
45
+
46
+ summary_image_dir = os.path.join(output_dir, "summary")
47
+ os.makedirs(summary_image_dir, exist_ok=True)
48
+
49
+ # graph settings
50
+ print("=> visualizing results...")
51
+ for view_idx in range(num_views):
52
+ plt.switch_backend("agg")
53
+ fig = plt.figure(dpi=100)
54
+ fig.set_size_inches(subplot_size * num_col, subplot_size * (num_row + 1))
55
+ fig.set_facecolor('white')
56
+
57
+ # rendering
58
+ plt.subplot2grid((num_row, num_col), (0, 0))
59
+ plt.imshow(Image.open(os.path.join(init_image_dir, "{}.png".format(view_idx))))
60
+ plt.text(0, 0, "Rendering", fontsize=16, color='black', backgroundcolor='white')
61
+ plt.axis('off')
62
+
63
+ # mask
64
+ plt.subplot2grid((num_row, num_col), (0, 1))
65
+ plt.imshow(Image.open(os.path.join(mask_image_dir, "{}_project.png".format(view_idx))))
66
+ plt.text(0, 0, "Project Mask", fontsize=16, color='black', backgroundcolor='white')
67
+ plt.set_cmap(cm.Greys_r)
68
+ plt.axis('off')
69
+
70
+ # inpainted
71
+ plt.subplot2grid((num_row, num_col), (0, 2))
72
+ plt.imshow(Image.open(os.path.join(inpainted_image_dir, "{}.png".format(view_idx))))
73
+ plt.text(0, 0, "Inpainted", fontsize=16, color='black', backgroundcolor='white')
74
+ plt.axis('off')
75
+
76
+
77
+ plt.savefig(os.path.join(summary_image_dir, "{}.png".format(view_idx)), bbox_inches="tight")
78
+ fig.clf()
79
+
80
+ # generate GIF
81
+ images = [imageio.imread(os.path.join(summary_image_dir, "{}.png".format(view_idx)))for view_idx in range(num_views)]
82
+ imageio.mimsave(os.path.join(summary_image_dir, "output.gif"), images, duration=1)
83
+
84
+ print("=> done!")
85
+
86
+
87
+ def visualize_principle_viewpoints(output_dir, dist_list, elev_list, azim_list):
88
+ theta_list = [e for e in azim_list]
89
+ phi_list = [90 - e for e in elev_list]
90
+ DIST = dist_list[0]
91
+
92
+ xyz_list = [polar_to_xyz(theta, phi, DIST) for theta, phi in zip(theta_list, phi_list)]
93
+
94
+ xyz_np = np.array(xyz_list)
95
+ color_np = np.array([[0, 0, 0]]).repeat(xyz_np.shape[0], 0)
96
+
97
+ fig = plt.figure()
98
+ ax = plt.axes(projection='3d')
99
+ SCALE = 0.8
100
+ ax.set_xlim((-DIST, DIST))
101
+ ax.set_ylim((-DIST, DIST))
102
+ ax.set_zlim((-SCALE * DIST, SCALE * DIST))
103
+
104
+ ax.scatter(xyz_np[:, 0], xyz_np[:, 2], xyz_np[:, 1], s=100, c=color_np, depthshade=True, label="Principle views")
105
+ ax.scatter([0], [0], [0], c=[[1, 0, 0]], s=100, depthshade=True, label="Object center")
106
+
107
+ # draw hemisphere
108
+ # theta inclination angle
109
+ # phi azimuthal angle
110
+ n_theta = 50 # number of values for theta
111
+ n_phi = 200 # number of values for phi
112
+ r = DIST #radius of sphere
113
+
114
+ # theta, phi = np.mgrid[0.0:0.5*np.pi:n_theta*1j, 0.0:2.0*np.pi:n_phi*1j]
115
+ theta, phi = np.mgrid[0.0:1*np.pi:n_theta*1j, 0.0:2.0*np.pi:n_phi*1j]
116
+
117
+ x = r*np.sin(theta)*np.cos(phi)
118
+ y = r*np.sin(theta)*np.sin(phi)
119
+ z = r*np.cos(theta)
120
+
121
+ ax.plot_surface(x, y, z, rstride=1, cstride=1, alpha=0.25, linewidth=1)
122
+
123
+ # Make the grid
124
+ ax.quiver(
125
+ xyz_np[:, 0],
126
+ xyz_np[:, 2],
127
+ xyz_np[:, 1],
128
+ -xyz_np[:, 0],
129
+ -xyz_np[:, 2],
130
+ -xyz_np[:, 1],
131
+ normalize=True,
132
+ length=0.3
133
+ )
134
+
135
+ ax.set_xlabel('X Label')
136
+ ax.set_ylabel('Z Label')
137
+ ax.set_zlabel('Y Label')
138
+
139
+ ax.view_init(30, 35)
140
+ ax.legend()
141
+
142
+ plt.show()
143
+
144
+ plt.savefig(os.path.join(output_dir, "principle_viewpoints.png"))
145
+
146
+
147
+
148
+ def visualize_refinement_viewpoints(output_dir, selected_view_ids, dist_list, elev_list, azim_list):
149
+ theta_list = [azim_list[i] for i in selected_view_ids]
150
+ phi_list = [90 - elev_list[i] for i in selected_view_ids]
151
+ DIST = dist_list[0]
152
+
153
+ xyz_list = [polar_to_xyz(theta, phi, DIST) for theta, phi in zip(theta_list, phi_list)]
154
+
155
+ xyz_np = np.array(xyz_list)
156
+ color_np = np.array([[0, 0, 0]]).repeat(xyz_np.shape[0], 0)
157
+
158
+ fig = plt.figure()
159
+ ax = plt.axes(projection='3d')
160
+ SCALE = 0.8
161
+ ax.set_xlim((-DIST, DIST))
162
+ ax.set_ylim((-DIST, DIST))
163
+ ax.set_zlim((-SCALE * DIST, SCALE * DIST))
164
+
165
+ ax.scatter(xyz_np[:, 0], xyz_np[:, 2], xyz_np[:, 1], c=color_np, depthshade=True, label="Refinement views")
166
+ ax.scatter([0], [0], [0], c=[[1, 0, 0]], s=100, depthshade=True, label="Object center")
167
+
168
+ # draw hemisphere
169
+ # theta inclination angle
170
+ # phi azimuthal angle
171
+ n_theta = 50 # number of values for theta
172
+ n_phi = 200 # number of values for phi
173
+ r = DIST #radius of sphere
174
+
175
+ # theta, phi = np.mgrid[0.0:0.5*np.pi:n_theta*1j, 0.0:2.0*np.pi:n_phi*1j]
176
+ theta, phi = np.mgrid[0.0:1*np.pi:n_theta*1j, 0.0:2.0*np.pi:n_phi*1j]
177
+
178
+ x = r*np.sin(theta)*np.cos(phi)
179
+ y = r*np.sin(theta)*np.sin(phi)
180
+ z = r*np.cos(theta)
181
+
182
+ ax.plot_surface(x, y, z, rstride=1, cstride=1, alpha=0.25, linewidth=1)
183
+
184
+ # Make the grid
185
+ ax.quiver(
186
+ xyz_np[:, 0],
187
+ xyz_np[:, 2],
188
+ xyz_np[:, 1],
189
+ -xyz_np[:, 0],
190
+ -xyz_np[:, 2],
191
+ -xyz_np[:, 1],
192
+ normalize=True,
193
+ length=0.3
194
+ )
195
+
196
+ ax.set_xlabel('X Label')
197
+ ax.set_ylabel('Z Label')
198
+ ax.set_zlabel('Y Label')
199
+
200
+ ax.view_init(30, 35)
201
+ ax.legend()
202
+
203
+ plt.show()
204
+
205
+ plt.savefig(os.path.join(output_dir, "refinement_viewpoints.png"))
206
+
207
+ fig.clear()
208
+
209
+
text2tex/models/ControlNet/.gitignore ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .idea/
2
+
3
+ training/
4
+ lightning_logs/
5
+ image_log/
6
+
7
+ *.pth
8
+ *.pt
9
+ *.ckpt
10
+ *.safetensors
11
+
12
+ gradio_pose2image_private.py
13
+ gradio_canny2image_private.py
14
+
15
+ # Byte-compiled / optimized / DLL files
16
+ __pycache__/
17
+ *.py[cod]
18
+ *$py.class
19
+
20
+ # C extensions
21
+ *.so
22
+
23
+ # Distribution / packaging
24
+ .Python
25
+ build/
26
+ develop-eggs/
27
+ dist/
28
+ downloads/
29
+ eggs/
30
+ .eggs/
31
+ lib/
32
+ lib64/
33
+ parts/
34
+ sdist/
35
+ var/
36
+ wheels/
37
+ pip-wheel-metadata/
38
+ share/python-wheels/
39
+ *.egg-info/
40
+ .installed.cfg
41
+ *.egg
42
+ MANIFEST
43
+
44
+ # PyInstaller
45
+ # Usually these files are written by a python script from a template
46
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
47
+ *.manifest
48
+ *.spec
49
+
50
+ # Installer logs
51
+ pip-log.txt
52
+ pip-delete-this-directory.txt
53
+
54
+ # Unit test / coverage reports
55
+ htmlcov/
56
+ .tox/
57
+ .nox/
58
+ .coverage
59
+ .coverage.*
60
+ .cache
61
+ nosetests.xml
62
+ coverage.xml
63
+ *.cover
64
+ *.py,cover
65
+ .hypothesis/
66
+ .pytest_cache/
67
+
68
+ # Translations
69
+ *.mo
70
+ *.pot
71
+
72
+ # Django stuff:
73
+ *.log
74
+ local_settings.py
75
+ db.sqlite3
76
+ db.sqlite3-journal
77
+
78
+ # Flask stuff:
79
+ instance/
80
+ .webassets-cache
81
+
82
+ # Scrapy stuff:
83
+ .scrapy
84
+
85
+ # Sphinx documentation
86
+ docs/_build/
87
+
88
+ # PyBuilder
89
+ target/
90
+
91
+ # Jupyter Notebook
92
+ .ipynb_checkpoints
93
+
94
+ # IPython
95
+ profile_default/
96
+ ipython_config.py
97
+
98
+ # pyenv
99
+ .python-version
100
+
101
+ # pipenv
102
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
103
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
104
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
105
+ # install all needed dependencies.
106
+ #Pipfile.lock
107
+
108
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
109
+ __pypackages__/
110
+
111
+ # Celery stuff
112
+ celerybeat-schedule
113
+ celerybeat.pid
114
+
115
+ # SageMath parsed files
116
+ *.sage.py
117
+
118
+ # Environments
119
+ .env
120
+ .venv
121
+ env/
122
+ venv/
123
+ ENV/
124
+ env.bak/
125
+ venv.bak/
126
+
127
+ # Spyder project settings
128
+ .spyderproject
129
+ .spyproject
130
+
131
+ # Rope project settings
132
+ .ropeproject
133
+
134
+ # mkdocs documentation
135
+ /site
136
+
137
+ # mypy
138
+ .mypy_cache/
139
+ .dmypy.json
140
+ dmypy.json
141
+
142
+ # Pyre type checker
143
+ .pyre/
text2tex/models/ControlNet/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
text2tex/models/ControlNet/README.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ControlNet
2
+
3
+ Official implementation of [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543).
4
+
5
+ ControlNet is a neural network structure to control diffusion models by adding extra conditions.
6
+
7
+ ![img](github_page/he.png)
8
+
9
+ It copys the weights of neural network blocks into a "locked" copy and a "trainable" copy.
10
+
11
+ The "trainable" one learns your condition. The "locked" one preserves your model.
12
+
13
+ Thanks to this, training with small dataset of image pairs will not destroy the production-ready diffusion models.
14
+
15
+ The "zero convolution" is 1×1 convolution with both weight and bias initialized as zeros.
16
+
17
+ Before training, all zero convolutions output zeros, and ControlNet will not cause any distortion.
18
+
19
+ No layer is trained from scratch. You are still fine-tuning. Your original model is safe.
20
+
21
+ This allows training on small-scale or even personal devices.
22
+
23
+ This is also friendly to merge/replacement/offsetting of models/weights/blocks/layers.
24
+
25
+ ### FAQ
26
+
27
+ **Q:** But wait, if the weight of a conv layer is zero, the gradient will also be zero, and the network will not learn anything. Why "zero convolution" works?
28
+
29
+ **A:** This is not true. [See an explanation here](docs/faq.md).
30
+
31
+ # Stable Diffusion + ControlNet
32
+
33
+ By repeating the above simple structure 14 times, we can control stable diffusion in this way:
34
+
35
+ ![img](github_page/sd.png)
36
+
37
+ Note that the way we connect layers is computational efficient. The original SD encoder does not need to store gradients (the locked original SD Encoder Block 1234 and Middle). The required GPU memory is not much larger than original SD, although many layers are added. Great!
38
+
39
+ # Production-Ready Pretrained Models
40
+
41
+ First create a new conda environment
42
+
43
+ conda env create -f environment.yaml
44
+ conda activate control
45
+
46
+ All models and detectors can be downloaded from [our Hugging Face page](https://huggingface.co/lllyasviel/ControlNet). Make sure that SD models are put in "ControlNet/models" and detectors are put in "ControlNet/annotator/ckpts". Make sure that you download all necessary pretrained weights and detector models from that Hugging Face page, including HED edge detection model, Midas depth estimation model, Openpose, and so on.
47
+
48
+ We provide 9 Gradio apps with these models.
49
+
50
+ All test images can be found at the folder "test_imgs".
51
+
52
+ ### News
53
+
54
+ 2023/02/12 - Now you can play with any community model by [Transferring the ControlNet](https://github.com/lllyasviel/ControlNet/discussions/12).
55
+
56
+ 2023/02/11 - [Low VRAM mode](docs/low_vram.md) is added. Please use this mode if you are using 8GB GPU(s) or if you want larger batch size.
57
+
58
+ ## ControlNet with Canny Edge
59
+
60
+ Stable Diffusion 1.5 + ControlNet (using simple Canny edge detection)
61
+
62
+ python gradio_canny2image.py
63
+
64
+ The Gradio app also allows you to change the Canny edge thresholds. Just try it for more details.
65
+
66
+ Prompt: "bird"
67
+ ![p](github_page/p1.png)
68
+
69
+ Prompt: "cute dog"
70
+ ![p](github_page/p2.png)
71
+
72
+ ## ControlNet with M-LSD Lines
73
+
74
+ Stable Diffusion 1.5 + ControlNet (using simple M-LSD straight line detection)
75
+
76
+ python gradio_hough2image.py
77
+
78
+ The Gradio app also allows you to change the M-LSD thresholds. Just try it for more details.
79
+
80
+ Prompt: "room"
81
+ ![p](github_page/p3.png)
82
+
83
+ Prompt: "building"
84
+ ![p](github_page/p4.png)
85
+
86
+ ## ControlNet with HED Boundary
87
+
88
+ Stable Diffusion 1.5 + ControlNet (using soft HED Boundary)
89
+
90
+ python gradio_hed2image.py
91
+
92
+ The soft HED Boundary will preserve many details in input images, making this app suitable for recoloring and stylizing. Just try it for more details.
93
+
94
+ Prompt: "oil painting of handsome old man, masterpiece"
95
+ ![p](github_page/p5.png)
96
+
97
+ Prompt: "Cyberpunk robot"
98
+ ![p](github_page/p6.png)
99
+
100
+ ## ControlNet with User Scribbles
101
+
102
+ Stable Diffusion 1.5 + ControlNet (using Scribbles)
103
+
104
+ python gradio_scribble2image.py
105
+
106
+ Note that the UI is based on Gradio, and Gradio is somewhat difficult to customize. Right now you need to draw scribbles outside the UI (using your favorite drawing software, for example, MS Paint) and then import the scribble image to Gradio.
107
+
108
+ Prompt: "turtle"
109
+ ![p](github_page/p7.png)
110
+
111
+ Prompt: "hot air balloon"
112
+ ![p](github_page/p8.png)
113
+
114
+ ### Interactive Interface
115
+
116
+ We actually provide an interactive interface
117
+
118
+ python gradio_scribble2image_interactive.py
119
+
120
+ However, because gradio is very [buggy](https://github.com/gradio-app/gradio/issues/3166) and difficult to customize, right now, user need to first set canvas width and heights and then click "Open drawing canvas" to get a drawing area. Please do not upload image to that drawing canvas. Also, the drawing area is very small; it should be bigger. But I failed to find out how to make it larger. Again, gradio is really buggy.
121
+
122
+ The below dog sketch is drawn by me. Perhaps we should draw a better dog for showcase.
123
+
124
+ Prompt: "dog in a room"
125
+ ![p](github_page/p20.png)
126
+
127
+ ## ControlNet with Fake Scribbles
128
+
129
+ Stable Diffusion 1.5 + ControlNet (using fake scribbles)
130
+
131
+ python gradio_fake_scribble2image.py
132
+
133
+ Sometimes we are lazy, and we do not want to draw scribbles. This script use the exactly same scribble-based model but use a simple algorithm to synthesize scribbles from input images.
134
+
135
+ Prompt: "bag"
136
+ ![p](github_page/p9.png)
137
+
138
+ Prompt: "shose" (Note that "shose" is a typo; it should be "shoes". But it still seems to work.)
139
+ ![p](github_page/p10.png)
140
+
141
+ ## ControlNet with Human Pose
142
+
143
+ Stable Diffusion 1.5 + ControlNet (using human pose)
144
+
145
+ python gradio_pose2image.py
146
+
147
+ Apparently, this model deserves a better UI to directly manipulate pose skeleton. However, again, Gradio is somewhat difficult to customize. Right now you need to input an image and then the Openpose will detect the pose for you.
148
+
149
+ Prompt: "Chief in the kitchen"
150
+ ![p](github_page/p11.png)
151
+
152
+ Prompt: "An astronaut on the moon"
153
+ ![p](github_page/p12.png)
154
+
155
+ ## ControlNet with Semantic Segmentation
156
+
157
+ Stable Diffusion 1.5 + ControlNet (using semantic segmentation)
158
+
159
+ python gradio_seg2image.py
160
+
161
+ This model use ADE20K's segmentation protocol. Again, this model deserves a better UI to directly draw the segmentations. However, again, Gradio is somewhat difficult to customize. Right now you need to input an image and then a model called Uniformer will detect the segmentations for you. Just try it for more details.
162
+
163
+ Prompt: "House"
164
+ ![p](github_page/p13.png)
165
+
166
+ Prompt: "River"
167
+ ![p](github_page/p14.png)
168
+
169
+ ## ControlNet with Depth
170
+
171
+ Stable Diffusion 1.5 + ControlNet (using depth map)
172
+
173
+ python gradio_depth2image.py
174
+
175
+ Great! Now SD 1.5 also have a depth control. FINALLY. So many possibilities (considering SD1.5 has much more community models than SD2).
176
+
177
+ Note that different from Stability's model, the ControlNet receive the full 512×512 depth map, rather than 64×64 depth. Note that Stability's SD2 depth model use 64*64 depth maps. This means that the ControlNet will preserve more details in the depth map.
178
+
179
+ This is always a strength because if users do not want to preserve more details, they can simply use another SD to post-process an i2i. But if they want to preserve more details, ControlNet becomes their only choice. Again, SD2 uses 64×64 depth, we use 512×512.
180
+
181
+ Prompt: "Stormtrooper's lecture"
182
+ ![p](github_page/p15.png)
183
+
184
+ ## ControlNet with Normal Map
185
+
186
+ Stable Diffusion 1.5 + ControlNet (using normal map)
187
+
188
+ python gradio_normal2image.py
189
+
190
+ This model use normal map. Rightnow in the APP, the normal is computed from the midas depth map and a user threshold (to determine how many area is background with identity normal face to viewer, tune the "Normal background threshold" in the gradio app to get a feeling).
191
+
192
+ Prompt: "Cute toy"
193
+ ![p](github_page/p17.png)
194
+
195
+ Prompt: "Plaster statue of Abraham Lincoln"
196
+ ![p](github_page/p18.png)
197
+
198
+ Compared to depth model, this model seems to be a bit better at preserving the geometry. This is intuitive: minor details are not salient in depth maps, but are salient in normal maps. Below is the depth result with same inputs. You can see that the hairstyle of the man in the input image is modified by depth model, but preserved by the normal model.
199
+
200
+ Prompt: "Plaster statue of Abraham Lincoln"
201
+ ![p](github_page/p19.png)
202
+
203
+ ## ControlNet with Anime Line Drawing
204
+
205
+ We also trained a relatively simple ControlNet for anime line drawings. This tool may be useful for artistic creations. (Although the image details in the results is a bit modified, since it still diffuse latent images.)
206
+
207
+ This model is not available right now. We need to evaluate the potential risks before releasing this model. Nevertheless, you may be interested in [transferring the ControlNet to any community model](https://github.com/lllyasviel/ControlNet/discussions/12).
208
+
209
+ ![p](github_page/p21.png)
210
+
211
+ # Annotate Your Own Data
212
+
213
+ We provide simple python scripts to process images.
214
+
215
+ [See a gradio example here](docs/annotator.md).
216
+
217
+ # Train with Your Own Data
218
+
219
+ Training a ControlNet is as easy as (or even easier than) training a simple pix2pix.
220
+
221
+ [See the steps here](docs/train.md).
222
+
223
+ # Citation
224
+
225
+ @misc{zhang2023adding,
226
+ title={Adding Conditional Control to Text-to-Image Diffusion Models},
227
+ author={Lvmin Zhang and Maneesh Agrawala},
228
+ year={2023},
229
+ eprint={2302.05543},
230
+ archivePrefix={arXiv},
231
+ primaryClass={cs.CV}
232
+ }
233
+
234
+ [Arxiv Link](https://arxiv.org/abs/2302.05543)
text2tex/models/ControlNet/annotator/canny/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import cv2
2
+
3
+
4
+ def apply_canny(img, low_threshold, high_threshold):
5
+ return cv2.Canny(img, low_threshold, high_threshold)
text2tex/models/ControlNet/annotator/ckpts/ckpts.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ Weights here.
text2tex/models/ControlNet/annotator/hed/__init__.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import torch
4
+ from einops import rearrange
5
+
6
+
7
+ class Network(torch.nn.Module):
8
+ def __init__(self):
9
+ super().__init__()
10
+
11
+ self.netVggOne = torch.nn.Sequential(
12
+ torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1),
13
+ torch.nn.ReLU(inplace=False),
14
+ torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
15
+ torch.nn.ReLU(inplace=False)
16
+ )
17
+
18
+ self.netVggTwo = torch.nn.Sequential(
19
+ torch.nn.MaxPool2d(kernel_size=2, stride=2),
20
+ torch.nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
21
+ torch.nn.ReLU(inplace=False),
22
+ torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
23
+ torch.nn.ReLU(inplace=False)
24
+ )
25
+
26
+ self.netVggThr = torch.nn.Sequential(
27
+ torch.nn.MaxPool2d(kernel_size=2, stride=2),
28
+ torch.nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
29
+ torch.nn.ReLU(inplace=False),
30
+ torch.nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
31
+ torch.nn.ReLU(inplace=False),
32
+ torch.nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1),
33
+ torch.nn.ReLU(inplace=False)
34
+ )
35
+
36
+ self.netVggFou = torch.nn.Sequential(
37
+ torch.nn.MaxPool2d(kernel_size=2, stride=2),
38
+ torch.nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1),
39
+ torch.nn.ReLU(inplace=False),
40
+ torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
41
+ torch.nn.ReLU(inplace=False),
42
+ torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
43
+ torch.nn.ReLU(inplace=False)
44
+ )
45
+
46
+ self.netVggFiv = torch.nn.Sequential(
47
+ torch.nn.MaxPool2d(kernel_size=2, stride=2),
48
+ torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
49
+ torch.nn.ReLU(inplace=False),
50
+ torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
51
+ torch.nn.ReLU(inplace=False),
52
+ torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1),
53
+ torch.nn.ReLU(inplace=False)
54
+ )
55
+
56
+ self.netScoreOne = torch.nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, stride=1, padding=0)
57
+ self.netScoreTwo = torch.nn.Conv2d(in_channels=128, out_channels=1, kernel_size=1, stride=1, padding=0)
58
+ self.netScoreThr = torch.nn.Conv2d(in_channels=256, out_channels=1, kernel_size=1, stride=1, padding=0)
59
+ self.netScoreFou = torch.nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0)
60
+ self.netScoreFiv = torch.nn.Conv2d(in_channels=512, out_channels=1, kernel_size=1, stride=1, padding=0)
61
+
62
+ self.netCombine = torch.nn.Sequential(
63
+ torch.nn.Conv2d(in_channels=5, out_channels=1, kernel_size=1, stride=1, padding=0),
64
+ torch.nn.Sigmoid()
65
+ )
66
+
67
+ self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load('./annotator/ckpts/network-bsds500.pth').items()})
68
+ # end
69
+
70
+ def forward(self, tenInput):
71
+ tenInput = tenInput * 255.0
72
+ tenInput = tenInput - torch.tensor(data=[104.00698793, 116.66876762, 122.67891434], dtype=tenInput.dtype, device=tenInput.device).view(1, 3, 1, 1)
73
+
74
+ tenVggOne = self.netVggOne(tenInput)
75
+ tenVggTwo = self.netVggTwo(tenVggOne)
76
+ tenVggThr = self.netVggThr(tenVggTwo)
77
+ tenVggFou = self.netVggFou(tenVggThr)
78
+ tenVggFiv = self.netVggFiv(tenVggFou)
79
+
80
+ tenScoreOne = self.netScoreOne(tenVggOne)
81
+ tenScoreTwo = self.netScoreTwo(tenVggTwo)
82
+ tenScoreThr = self.netScoreThr(tenVggThr)
83
+ tenScoreFou = self.netScoreFou(tenVggFou)
84
+ tenScoreFiv = self.netScoreFiv(tenVggFiv)
85
+
86
+ tenScoreOne = torch.nn.functional.interpolate(input=tenScoreOne, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
87
+ tenScoreTwo = torch.nn.functional.interpolate(input=tenScoreTwo, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
88
+ tenScoreThr = torch.nn.functional.interpolate(input=tenScoreThr, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
89
+ tenScoreFou = torch.nn.functional.interpolate(input=tenScoreFou, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
90
+ tenScoreFiv = torch.nn.functional.interpolate(input=tenScoreFiv, size=(tenInput.shape[2], tenInput.shape[3]), mode='bilinear', align_corners=False)
91
+
92
+ return self.netCombine(torch.cat([ tenScoreOne, tenScoreTwo, tenScoreThr, tenScoreFou, tenScoreFiv ], 1))
93
+ # end
94
+ # end
95
+
96
+
97
+ netNetwork = Network().cuda().eval()
98
+
99
+
100
+ def apply_hed(input_image):
101
+ assert input_image.ndim == 3
102
+ input_image = input_image[:, :, ::-1].copy()
103
+ with torch.no_grad():
104
+ image_hed = torch.from_numpy(input_image).float().cuda()
105
+ image_hed = image_hed / 255.0
106
+ image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
107
+ edge = netNetwork(image_hed)[0]
108
+ edge = (edge.cpu().numpy() * 255.0).clip(0, 255).astype(np.uint8)
109
+ return edge[0]
110
+
111
+
112
+ def nms(x, t, s):
113
+ x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
114
+
115
+ f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
116
+ f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
117
+ f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
118
+ f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
119
+
120
+ y = np.zeros_like(x)
121
+
122
+ for f in [f1, f2, f3, f4]:
123
+ np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
124
+
125
+ z = np.zeros_like(y, dtype=np.uint8)
126
+ z[y > t] = 255
127
+ return z
text2tex/models/ControlNet/annotator/midas/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+
5
+ from einops import rearrange
6
+ from .api import MiDaSInference
7
+
8
+ model = MiDaSInference(model_type="dpt_hybrid").cuda()
9
+
10
+
11
+ def apply_midas(input_image, a=np.pi * 2.0, bg_th=0.1):
12
+ assert input_image.ndim == 3
13
+ image_depth = input_image
14
+ with torch.no_grad():
15
+ image_depth = torch.from_numpy(image_depth).float().cuda()
16
+ image_depth = image_depth / 127.5 - 1.0
17
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
18
+ depth = model(image_depth)[0]
19
+
20
+ depth_pt = depth.clone()
21
+ depth_pt -= torch.min(depth_pt)
22
+ depth_pt /= torch.max(depth_pt)
23
+ depth_pt = depth_pt.cpu().numpy()
24
+ depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
25
+
26
+ depth_np = depth.cpu().numpy()
27
+ x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
28
+ y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
29
+ z = np.ones_like(x) * a
30
+ x[depth_pt < bg_th] = 0
31
+ y[depth_pt < bg_th] = 0
32
+ normal = np.stack([x, y, z], axis=2)
33
+ normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
34
+ normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
35
+
36
+ return depth_image, normal_image
text2tex/models/ControlNet/annotator/midas/api.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # based on https://github.com/isl-org/MiDaS
2
+
3
+ import cv2
4
+ import torch
5
+ import torch.nn as nn
6
+ from torchvision.transforms import Compose
7
+
8
+ from .midas.dpt_depth import DPTDepthModel
9
+ from .midas.midas_net import MidasNet
10
+ from .midas.midas_net_custom import MidasNet_small
11
+ from .midas.transforms import Resize, NormalizeImage, PrepareForNet
12
+
13
+ import os
14
+ import sys
15
+
16
+ BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")
17
+
18
+ ISL_PATHS = {
19
+ "dpt_large": BASE_DIR+"/annotator/ckpts/dpt_large-midas-2f21e586.pt",
20
+ "dpt_hybrid": BASE_DIR+"/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt",
21
+ "midas_v21": "",
22
+ "midas_v21_small": "",
23
+ }
24
+
25
+
26
+ def disabled_train(self, mode=True):
27
+ """Overwrite model.train with this function to make sure train/eval mode
28
+ does not change anymore."""
29
+ return self
30
+
31
+
32
+ def load_midas_transform(model_type):
33
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
34
+ # load transform only
35
+ if model_type == "dpt_large": # DPT-Large
36
+ net_w, net_h = 384, 384
37
+ resize_mode = "minimal"
38
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
39
+
40
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
41
+ net_w, net_h = 384, 384
42
+ resize_mode = "minimal"
43
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
44
+
45
+ elif model_type == "midas_v21":
46
+ net_w, net_h = 384, 384
47
+ resize_mode = "upper_bound"
48
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
49
+
50
+ elif model_type == "midas_v21_small":
51
+ net_w, net_h = 256, 256
52
+ resize_mode = "upper_bound"
53
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
54
+
55
+ else:
56
+ assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
57
+
58
+ transform = Compose(
59
+ [
60
+ Resize(
61
+ net_w,
62
+ net_h,
63
+ resize_target=None,
64
+ keep_aspect_ratio=True,
65
+ ensure_multiple_of=32,
66
+ resize_method=resize_mode,
67
+ image_interpolation_method=cv2.INTER_CUBIC,
68
+ ),
69
+ normalization,
70
+ PrepareForNet(),
71
+ ]
72
+ )
73
+
74
+ return transform
75
+
76
+
77
+ def load_model(model_type):
78
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
79
+ # load network
80
+ model_path = ISL_PATHS[model_type]
81
+ if model_type == "dpt_large": # DPT-Large
82
+ model = DPTDepthModel(
83
+ path=model_path,
84
+ backbone="vitl16_384",
85
+ non_negative=True,
86
+ )
87
+ net_w, net_h = 384, 384
88
+ resize_mode = "minimal"
89
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
90
+
91
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
92
+ model = DPTDepthModel(
93
+ path=model_path,
94
+ backbone="vitb_rn50_384",
95
+ non_negative=True,
96
+ )
97
+ net_w, net_h = 384, 384
98
+ resize_mode = "minimal"
99
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
100
+
101
+ elif model_type == "midas_v21":
102
+ model = MidasNet(model_path, non_negative=True)
103
+ net_w, net_h = 384, 384
104
+ resize_mode = "upper_bound"
105
+ normalization = NormalizeImage(
106
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
107
+ )
108
+
109
+ elif model_type == "midas_v21_small":
110
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
111
+ non_negative=True, blocks={'expand': True})
112
+ net_w, net_h = 256, 256
113
+ resize_mode = "upper_bound"
114
+ normalization = NormalizeImage(
115
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
116
+ )
117
+
118
+ else:
119
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
120
+ assert False
121
+
122
+ transform = Compose(
123
+ [
124
+ Resize(
125
+ net_w,
126
+ net_h,
127
+ resize_target=None,
128
+ keep_aspect_ratio=True,
129
+ ensure_multiple_of=32,
130
+ resize_method=resize_mode,
131
+ image_interpolation_method=cv2.INTER_CUBIC,
132
+ ),
133
+ normalization,
134
+ PrepareForNet(),
135
+ ]
136
+ )
137
+
138
+ return model.eval(), transform
139
+
140
+
141
+ class MiDaSInference(nn.Module):
142
+ MODEL_TYPES_TORCH_HUB = [
143
+ "DPT_Large",
144
+ "DPT_Hybrid",
145
+ "MiDaS_small"
146
+ ]
147
+ MODEL_TYPES_ISL = [
148
+ "dpt_large",
149
+ "dpt_hybrid",
150
+ "midas_v21",
151
+ "midas_v21_small",
152
+ ]
153
+
154
+ def __init__(self, model_type):
155
+ super().__init__()
156
+ assert (model_type in self.MODEL_TYPES_ISL)
157
+ model, _ = load_model(model_type)
158
+ self.model = model
159
+ self.model.train = disabled_train
160
+
161
+ def forward(self, x):
162
+ with torch.no_grad():
163
+ prediction = self.model(x)
164
+ return prediction
165
+
text2tex/models/ControlNet/annotator/midas/midas/__init__.py ADDED
File without changes
text2tex/models/ControlNet/annotator/midas/midas/base_model.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class BaseModel(torch.nn.Module):
5
+ def load(self, path):
6
+ """Load model from file.
7
+
8
+ Args:
9
+ path (str): file path
10
+ """
11
+ parameters = torch.load(path, map_location=torch.device('cpu'))
12
+
13
+ if "optimizer" in parameters:
14
+ parameters = parameters["model"]
15
+
16
+ self.load_state_dict(parameters)
text2tex/models/ControlNet/annotator/midas/midas/blocks.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .vit import (
5
+ _make_pretrained_vitb_rn50_384,
6
+ _make_pretrained_vitl16_384,
7
+ _make_pretrained_vitb16_384,
8
+ forward_vit,
9
+ )
10
+
11
+ def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
12
+ if backbone == "vitl16_384":
13
+ pretrained = _make_pretrained_vitl16_384(
14
+ use_pretrained, hooks=hooks, use_readout=use_readout
15
+ )
16
+ scratch = _make_scratch(
17
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
18
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
19
+ elif backbone == "vitb_rn50_384":
20
+ pretrained = _make_pretrained_vitb_rn50_384(
21
+ use_pretrained,
22
+ hooks=hooks,
23
+ use_vit_only=use_vit_only,
24
+ use_readout=use_readout,
25
+ )
26
+ scratch = _make_scratch(
27
+ [256, 512, 768, 768], features, groups=groups, expand=expand
28
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
29
+ elif backbone == "vitb16_384":
30
+ pretrained = _make_pretrained_vitb16_384(
31
+ use_pretrained, hooks=hooks, use_readout=use_readout
32
+ )
33
+ scratch = _make_scratch(
34
+ [96, 192, 384, 768], features, groups=groups, expand=expand
35
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
36
+ elif backbone == "resnext101_wsl":
37
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
38
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
39
+ elif backbone == "efficientnet_lite3":
40
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
41
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
42
+ else:
43
+ print(f"Backbone '{backbone}' not implemented")
44
+ assert False
45
+
46
+ return pretrained, scratch
47
+
48
+
49
+ def _make_scratch(in_shape, out_shape, groups=1, expand=False):
50
+ scratch = nn.Module()
51
+
52
+ out_shape1 = out_shape
53
+ out_shape2 = out_shape
54
+ out_shape3 = out_shape
55
+ out_shape4 = out_shape
56
+ if expand==True:
57
+ out_shape1 = out_shape
58
+ out_shape2 = out_shape*2
59
+ out_shape3 = out_shape*4
60
+ out_shape4 = out_shape*8
61
+
62
+ scratch.layer1_rn = nn.Conv2d(
63
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
64
+ )
65
+ scratch.layer2_rn = nn.Conv2d(
66
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
67
+ )
68
+ scratch.layer3_rn = nn.Conv2d(
69
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
70
+ )
71
+ scratch.layer4_rn = nn.Conv2d(
72
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
73
+ )
74
+
75
+ return scratch
76
+
77
+
78
+ def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
79
+ efficientnet = torch.hub.load(
80
+ "rwightman/gen-efficientnet-pytorch",
81
+ "tf_efficientnet_lite3",
82
+ pretrained=use_pretrained,
83
+ exportable=exportable
84
+ )
85
+ return _make_efficientnet_backbone(efficientnet)
86
+
87
+
88
+ def _make_efficientnet_backbone(effnet):
89
+ pretrained = nn.Module()
90
+
91
+ pretrained.layer1 = nn.Sequential(
92
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
93
+ )
94
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
95
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
96
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
97
+
98
+ return pretrained
99
+
100
+
101
+ def _make_resnet_backbone(resnet):
102
+ pretrained = nn.Module()
103
+ pretrained.layer1 = nn.Sequential(
104
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
105
+ )
106
+
107
+ pretrained.layer2 = resnet.layer2
108
+ pretrained.layer3 = resnet.layer3
109
+ pretrained.layer4 = resnet.layer4
110
+
111
+ return pretrained
112
+
113
+
114
+ def _make_pretrained_resnext101_wsl(use_pretrained):
115
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
116
+ return _make_resnet_backbone(resnet)
117
+
118
+
119
+
120
+ class Interpolate(nn.Module):
121
+ """Interpolation module.
122
+ """
123
+
124
+ def __init__(self, scale_factor, mode, align_corners=False):
125
+ """Init.
126
+
127
+ Args:
128
+ scale_factor (float): scaling
129
+ mode (str): interpolation mode
130
+ """
131
+ super(Interpolate, self).__init__()
132
+
133
+ self.interp = nn.functional.interpolate
134
+ self.scale_factor = scale_factor
135
+ self.mode = mode
136
+ self.align_corners = align_corners
137
+
138
+ def forward(self, x):
139
+ """Forward pass.
140
+
141
+ Args:
142
+ x (tensor): input
143
+
144
+ Returns:
145
+ tensor: interpolated data
146
+ """
147
+
148
+ x = self.interp(
149
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
150
+ )
151
+
152
+ return x
153
+
154
+
155
+ class ResidualConvUnit(nn.Module):
156
+ """Residual convolution module.
157
+ """
158
+
159
+ def __init__(self, features):
160
+ """Init.
161
+
162
+ Args:
163
+ features (int): number of features
164
+ """
165
+ super().__init__()
166
+
167
+ self.conv1 = nn.Conv2d(
168
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
169
+ )
170
+
171
+ self.conv2 = nn.Conv2d(
172
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
173
+ )
174
+
175
+ self.relu = nn.ReLU(inplace=True)
176
+
177
+ def forward(self, x):
178
+ """Forward pass.
179
+
180
+ Args:
181
+ x (tensor): input
182
+
183
+ Returns:
184
+ tensor: output
185
+ """
186
+ out = self.relu(x)
187
+ out = self.conv1(out)
188
+ out = self.relu(out)
189
+ out = self.conv2(out)
190
+
191
+ return out + x
192
+
193
+
194
+ class FeatureFusionBlock(nn.Module):
195
+ """Feature fusion block.
196
+ """
197
+
198
+ def __init__(self, features):
199
+ """Init.
200
+
201
+ Args:
202
+ features (int): number of features
203
+ """
204
+ super(FeatureFusionBlock, self).__init__()
205
+
206
+ self.resConfUnit1 = ResidualConvUnit(features)
207
+ self.resConfUnit2 = ResidualConvUnit(features)
208
+
209
+ def forward(self, *xs):
210
+ """Forward pass.
211
+
212
+ Returns:
213
+ tensor: output
214
+ """
215
+ output = xs[0]
216
+
217
+ if len(xs) == 2:
218
+ output += self.resConfUnit1(xs[1])
219
+
220
+ output = self.resConfUnit2(output)
221
+
222
+ output = nn.functional.interpolate(
223
+ output, scale_factor=2, mode="bilinear", align_corners=True
224
+ )
225
+
226
+ return output
227
+
228
+
229
+
230
+
231
+ class ResidualConvUnit_custom(nn.Module):
232
+ """Residual convolution module.
233
+ """
234
+
235
+ def __init__(self, features, activation, bn):
236
+ """Init.
237
+
238
+ Args:
239
+ features (int): number of features
240
+ """
241
+ super().__init__()
242
+
243
+ self.bn = bn
244
+
245
+ self.groups=1
246
+
247
+ self.conv1 = nn.Conv2d(
248
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
249
+ )
250
+
251
+ self.conv2 = nn.Conv2d(
252
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
253
+ )
254
+
255
+ if self.bn==True:
256
+ self.bn1 = nn.BatchNorm2d(features)
257
+ self.bn2 = nn.BatchNorm2d(features)
258
+
259
+ self.activation = activation
260
+
261
+ self.skip_add = nn.quantized.FloatFunctional()
262
+
263
+ def forward(self, x):
264
+ """Forward pass.
265
+
266
+ Args:
267
+ x (tensor): input
268
+
269
+ Returns:
270
+ tensor: output
271
+ """
272
+
273
+ out = self.activation(x)
274
+ out = self.conv1(out)
275
+ if self.bn==True:
276
+ out = self.bn1(out)
277
+
278
+ out = self.activation(out)
279
+ out = self.conv2(out)
280
+ if self.bn==True:
281
+ out = self.bn2(out)
282
+
283
+ if self.groups > 1:
284
+ out = self.conv_merge(out)
285
+
286
+ return self.skip_add.add(out, x)
287
+
288
+ # return out + x
289
+
290
+
291
+ class FeatureFusionBlock_custom(nn.Module):
292
+ """Feature fusion block.
293
+ """
294
+
295
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
296
+ """Init.
297
+
298
+ Args:
299
+ features (int): number of features
300
+ """
301
+ super(FeatureFusionBlock_custom, self).__init__()
302
+
303
+ self.deconv = deconv
304
+ self.align_corners = align_corners
305
+
306
+ self.groups=1
307
+
308
+ self.expand = expand
309
+ out_features = features
310
+ if self.expand==True:
311
+ out_features = features//2
312
+
313
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
314
+
315
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
316
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
317
+
318
+ self.skip_add = nn.quantized.FloatFunctional()
319
+
320
+ def forward(self, *xs):
321
+ """Forward pass.
322
+
323
+ Returns:
324
+ tensor: output
325
+ """
326
+ output = xs[0]
327
+
328
+ if len(xs) == 2:
329
+ res = self.resConfUnit1(xs[1])
330
+ output = self.skip_add.add(output, res)
331
+ # output += res
332
+
333
+ output = self.resConfUnit2(output)
334
+
335
+ output = nn.functional.interpolate(
336
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
337
+ )
338
+
339
+ output = self.out_conv(output)
340
+
341
+ return output
342
+
text2tex/models/ControlNet/annotator/midas/midas/dpt_depth.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from .base_model import BaseModel
6
+ from .blocks import (
7
+ FeatureFusionBlock,
8
+ FeatureFusionBlock_custom,
9
+ Interpolate,
10
+ _make_encoder,
11
+ forward_vit,
12
+ )
13
+
14
+
15
+ def _make_fusion_block(features, use_bn):
16
+ return FeatureFusionBlock_custom(
17
+ features,
18
+ nn.ReLU(False),
19
+ deconv=False,
20
+ bn=use_bn,
21
+ expand=False,
22
+ align_corners=True,
23
+ )
24
+
25
+
26
+ class DPT(BaseModel):
27
+ def __init__(
28
+ self,
29
+ head,
30
+ features=256,
31
+ backbone="vitb_rn50_384",
32
+ readout="project",
33
+ channels_last=False,
34
+ use_bn=False,
35
+ ):
36
+
37
+ super(DPT, self).__init__()
38
+
39
+ self.channels_last = channels_last
40
+
41
+ hooks = {
42
+ "vitb_rn50_384": [0, 1, 8, 11],
43
+ "vitb16_384": [2, 5, 8, 11],
44
+ "vitl16_384": [5, 11, 17, 23],
45
+ }
46
+
47
+ # Instantiate backbone and reassemble blocks
48
+ self.pretrained, self.scratch = _make_encoder(
49
+ backbone,
50
+ features,
51
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
52
+ groups=1,
53
+ expand=False,
54
+ exportable=False,
55
+ hooks=hooks[backbone],
56
+ use_readout=readout,
57
+ )
58
+
59
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
+
64
+ self.scratch.output_conv = head
65
+
66
+
67
+ def forward(self, x):
68
+ if self.channels_last == True:
69
+ x.contiguous(memory_format=torch.channels_last)
70
+
71
+ layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
+
73
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
74
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
75
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
76
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
77
+
78
+ path_4 = self.scratch.refinenet4(layer_4_rn)
79
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
+
83
+ out = self.scratch.output_conv(path_1)
84
+
85
+ return out
86
+
87
+
88
+ class DPTDepthModel(DPT):
89
+ def __init__(self, path=None, non_negative=True, **kwargs):
90
+ features = kwargs["features"] if "features" in kwargs else 256
91
+
92
+ head = nn.Sequential(
93
+ nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
+ nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
+ nn.ReLU(True),
97
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
+ nn.ReLU(True) if non_negative else nn.Identity(),
99
+ nn.Identity(),
100
+ )
101
+
102
+ super().__init__(head, **kwargs)
103
+
104
+ if path is not None:
105
+ self.load(path)
106
+
107
+ def forward(self, x):
108
+ return super().forward(x).squeeze(dim=1)
109
+
text2tex/models/ControlNet/annotator/midas/midas/midas_net.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=256, non_negative=True):
17
+ """Init.
18
+
19
+ Args:
20
+ path (str, optional): Path to saved model. Defaults to None.
21
+ features (int, optional): Number of features. Defaults to 256.
22
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
+ """
24
+ print("Loading weights: ", path)
25
+
26
+ super(MidasNet, self).__init__()
27
+
28
+ use_pretrained = False if path is None else True
29
+
30
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
+
32
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
33
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
34
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
35
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
36
+
37
+ self.scratch.output_conv = nn.Sequential(
38
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
+ Interpolate(scale_factor=2, mode="bilinear"),
40
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
+ nn.ReLU(True),
42
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
+ nn.ReLU(True) if non_negative else nn.Identity(),
44
+ )
45
+
46
+ if path:
47
+ self.load(path)
48
+
49
+ def forward(self, x):
50
+ """Forward pass.
51
+
52
+ Args:
53
+ x (tensor): input data (image)
54
+
55
+ Returns:
56
+ tensor: depth
57
+ """
58
+
59
+ layer_1 = self.pretrained.layer1(x)
60
+ layer_2 = self.pretrained.layer2(layer_1)
61
+ layer_3 = self.pretrained.layer3(layer_2)
62
+ layer_4 = self.pretrained.layer4(layer_3)
63
+
64
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
65
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
66
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
67
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
68
+
69
+ path_4 = self.scratch.refinenet4(layer_4_rn)
70
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
+
74
+ out = self.scratch.output_conv(path_1)
75
+
76
+ return torch.squeeze(out, dim=1)
text2tex/models/ControlNet/annotator/midas/midas/midas_net_custom.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
+ This file contains code that is adapted from
3
+ https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ from .base_model import BaseModel
9
+ from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
+
11
+
12
+ class MidasNet_small(BaseModel):
13
+ """Network for monocular depth estimation.
14
+ """
15
+
16
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
+ blocks={'expand': True}):
18
+ """Init.
19
+
20
+ Args:
21
+ path (str, optional): Path to saved model. Defaults to None.
22
+ features (int, optional): Number of features. Defaults to 256.
23
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
+ """
25
+ print("Loading weights: ", path)
26
+
27
+ super(MidasNet_small, self).__init__()
28
+
29
+ use_pretrained = False if path else True
30
+
31
+ self.channels_last = channels_last
32
+ self.blocks = blocks
33
+ self.backbone = backbone
34
+
35
+ self.groups = 1
36
+
37
+ features1=features
38
+ features2=features
39
+ features3=features
40
+ features4=features
41
+ self.expand = False
42
+ if "expand" in self.blocks and self.blocks['expand'] == True:
43
+ self.expand = True
44
+ features1=features
45
+ features2=features*2
46
+ features3=features*4
47
+ features4=features*8
48
+
49
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
+
51
+ self.scratch.activation = nn.ReLU(False)
52
+
53
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
+
58
+
59
+ self.scratch.output_conv = nn.Sequential(
60
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
+ Interpolate(scale_factor=2, mode="bilinear"),
62
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
+ self.scratch.activation,
64
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
+ nn.ReLU(True) if non_negative else nn.Identity(),
66
+ nn.Identity(),
67
+ )
68
+
69
+ if path:
70
+ self.load(path)
71
+
72
+
73
+ def forward(self, x):
74
+ """Forward pass.
75
+
76
+ Args:
77
+ x (tensor): input data (image)
78
+
79
+ Returns:
80
+ tensor: depth
81
+ """
82
+ if self.channels_last==True:
83
+ print("self.channels_last = ", self.channels_last)
84
+ x.contiguous(memory_format=torch.channels_last)
85
+
86
+
87
+ layer_1 = self.pretrained.layer1(x)
88
+ layer_2 = self.pretrained.layer2(layer_1)
89
+ layer_3 = self.pretrained.layer3(layer_2)
90
+ layer_4 = self.pretrained.layer4(layer_3)
91
+
92
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
93
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
94
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
95
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
96
+
97
+
98
+ path_4 = self.scratch.refinenet4(layer_4_rn)
99
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
+
103
+ out = self.scratch.output_conv(path_1)
104
+
105
+ return torch.squeeze(out, dim=1)
106
+
107
+
108
+
109
+ def fuse_model(m):
110
+ prev_previous_type = nn.Identity()
111
+ prev_previous_name = ''
112
+ previous_type = nn.Identity()
113
+ previous_name = ''
114
+ for name, module in m.named_modules():
115
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
+ # print("FUSED ", prev_previous_name, previous_name, name)
117
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
+ # print("FUSED ", prev_previous_name, previous_name)
120
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
+ # print("FUSED ", previous_name, name)
123
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
+
125
+ prev_previous_type = previous_type
126
+ prev_previous_name = previous_name
127
+ previous_type = type(module)
128
+ previous_name = name
text2tex/models/ControlNet/annotator/midas/midas/transforms.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import math
4
+
5
+
6
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
7
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
8
+
9
+ Args:
10
+ sample (dict): sample
11
+ size (tuple): image size
12
+
13
+ Returns:
14
+ tuple: new size
15
+ """
16
+ shape = list(sample["disparity"].shape)
17
+
18
+ if shape[0] >= size[0] and shape[1] >= size[1]:
19
+ return sample
20
+
21
+ scale = [0, 0]
22
+ scale[0] = size[0] / shape[0]
23
+ scale[1] = size[1] / shape[1]
24
+
25
+ scale = max(scale)
26
+
27
+ shape[0] = math.ceil(scale * shape[0])
28
+ shape[1] = math.ceil(scale * shape[1])
29
+
30
+ # resize
31
+ sample["image"] = cv2.resize(
32
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
33
+ )
34
+
35
+ sample["disparity"] = cv2.resize(
36
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
37
+ )
38
+ sample["mask"] = cv2.resize(
39
+ sample["mask"].astype(np.float32),
40
+ tuple(shape[::-1]),
41
+ interpolation=cv2.INTER_NEAREST,
42
+ )
43
+ sample["mask"] = sample["mask"].astype(bool)
44
+
45
+ return tuple(shape)
46
+
47
+
48
+ class Resize(object):
49
+ """Resize sample to given size (width, height).
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ width,
55
+ height,
56
+ resize_target=True,
57
+ keep_aspect_ratio=False,
58
+ ensure_multiple_of=1,
59
+ resize_method="lower_bound",
60
+ image_interpolation_method=cv2.INTER_AREA,
61
+ ):
62
+ """Init.
63
+
64
+ Args:
65
+ width (int): desired output width
66
+ height (int): desired output height
67
+ resize_target (bool, optional):
68
+ True: Resize the full sample (image, mask, target).
69
+ False: Resize image only.
70
+ Defaults to True.
71
+ keep_aspect_ratio (bool, optional):
72
+ True: Keep the aspect ratio of the input sample.
73
+ Output sample might not have the given width and height, and
74
+ resize behaviour depends on the parameter 'resize_method'.
75
+ Defaults to False.
76
+ ensure_multiple_of (int, optional):
77
+ Output width and height is constrained to be multiple of this parameter.
78
+ Defaults to 1.
79
+ resize_method (str, optional):
80
+ "lower_bound": Output will be at least as large as the given size.
81
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
82
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
83
+ Defaults to "lower_bound".
84
+ """
85
+ self.__width = width
86
+ self.__height = height
87
+
88
+ self.__resize_target = resize_target
89
+ self.__keep_aspect_ratio = keep_aspect_ratio
90
+ self.__multiple_of = ensure_multiple_of
91
+ self.__resize_method = resize_method
92
+ self.__image_interpolation_method = image_interpolation_method
93
+
94
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
95
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
96
+
97
+ if max_val is not None and y > max_val:
98
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
99
+
100
+ if y < min_val:
101
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
102
+
103
+ return y
104
+
105
+ def get_size(self, width, height):
106
+ # determine new height and width
107
+ scale_height = self.__height / height
108
+ scale_width = self.__width / width
109
+
110
+ if self.__keep_aspect_ratio:
111
+ if self.__resize_method == "lower_bound":
112
+ # scale such that output size is lower bound
113
+ if scale_width > scale_height:
114
+ # fit width
115
+ scale_height = scale_width
116
+ else:
117
+ # fit height
118
+ scale_width = scale_height
119
+ elif self.__resize_method == "upper_bound":
120
+ # scale such that output size is upper bound
121
+ if scale_width < scale_height:
122
+ # fit width
123
+ scale_height = scale_width
124
+ else:
125
+ # fit height
126
+ scale_width = scale_height
127
+ elif self.__resize_method == "minimal":
128
+ # scale as least as possbile
129
+ if abs(1 - scale_width) < abs(1 - scale_height):
130
+ # fit width
131
+ scale_height = scale_width
132
+ else:
133
+ # fit height
134
+ scale_width = scale_height
135
+ else:
136
+ raise ValueError(
137
+ f"resize_method {self.__resize_method} not implemented"
138
+ )
139
+
140
+ if self.__resize_method == "lower_bound":
141
+ new_height = self.constrain_to_multiple_of(
142
+ scale_height * height, min_val=self.__height
143
+ )
144
+ new_width = self.constrain_to_multiple_of(
145
+ scale_width * width, min_val=self.__width
146
+ )
147
+ elif self.__resize_method == "upper_bound":
148
+ new_height = self.constrain_to_multiple_of(
149
+ scale_height * height, max_val=self.__height
150
+ )
151
+ new_width = self.constrain_to_multiple_of(
152
+ scale_width * width, max_val=self.__width
153
+ )
154
+ elif self.__resize_method == "minimal":
155
+ new_height = self.constrain_to_multiple_of(scale_height * height)
156
+ new_width = self.constrain_to_multiple_of(scale_width * width)
157
+ else:
158
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
159
+
160
+ return (new_width, new_height)
161
+
162
+ def __call__(self, sample):
163
+ width, height = self.get_size(
164
+ sample["image"].shape[1], sample["image"].shape[0]
165
+ )
166
+
167
+ # resize sample
168
+ sample["image"] = cv2.resize(
169
+ sample["image"],
170
+ (width, height),
171
+ interpolation=self.__image_interpolation_method,
172
+ )
173
+
174
+ if self.__resize_target:
175
+ if "disparity" in sample:
176
+ sample["disparity"] = cv2.resize(
177
+ sample["disparity"],
178
+ (width, height),
179
+ interpolation=cv2.INTER_NEAREST,
180
+ )
181
+
182
+ if "depth" in sample:
183
+ sample["depth"] = cv2.resize(
184
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
185
+ )
186
+
187
+ sample["mask"] = cv2.resize(
188
+ sample["mask"].astype(np.float32),
189
+ (width, height),
190
+ interpolation=cv2.INTER_NEAREST,
191
+ )
192
+ sample["mask"] = sample["mask"].astype(bool)
193
+
194
+ return sample
195
+
196
+
197
+ class NormalizeImage(object):
198
+ """Normlize image by given mean and std.
199
+ """
200
+
201
+ def __init__(self, mean, std):
202
+ self.__mean = mean
203
+ self.__std = std
204
+
205
+ def __call__(self, sample):
206
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
207
+
208
+ return sample
209
+
210
+
211
+ class PrepareForNet(object):
212
+ """Prepare sample for usage as network input.
213
+ """
214
+
215
+ def __init__(self):
216
+ pass
217
+
218
+ def __call__(self, sample):
219
+ image = np.transpose(sample["image"], (2, 0, 1))
220
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
221
+
222
+ if "mask" in sample:
223
+ sample["mask"] = sample["mask"].astype(np.float32)
224
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
225
+
226
+ if "disparity" in sample:
227
+ disparity = sample["disparity"].astype(np.float32)
228
+ sample["disparity"] = np.ascontiguousarray(disparity)
229
+
230
+ if "depth" in sample:
231
+ depth = sample["depth"].astype(np.float32)
232
+ sample["depth"] = np.ascontiguousarray(depth)
233
+
234
+ return sample
text2tex/models/ControlNet/annotator/midas/midas/vit.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import timm
4
+ import types
5
+ import math
6
+ import torch.nn.functional as F
7
+
8
+
9
+ class Slice(nn.Module):
10
+ def __init__(self, start_index=1):
11
+ super(Slice, self).__init__()
12
+ self.start_index = start_index
13
+
14
+ def forward(self, x):
15
+ return x[:, self.start_index :]
16
+
17
+
18
+ class AddReadout(nn.Module):
19
+ def __init__(self, start_index=1):
20
+ super(AddReadout, self).__init__()
21
+ self.start_index = start_index
22
+
23
+ def forward(self, x):
24
+ if self.start_index == 2:
25
+ readout = (x[:, 0] + x[:, 1]) / 2
26
+ else:
27
+ readout = x[:, 0]
28
+ return x[:, self.start_index :] + readout.unsqueeze(1)
29
+
30
+
31
+ class ProjectReadout(nn.Module):
32
+ def __init__(self, in_features, start_index=1):
33
+ super(ProjectReadout, self).__init__()
34
+ self.start_index = start_index
35
+
36
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
37
+
38
+ def forward(self, x):
39
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
40
+ features = torch.cat((x[:, self.start_index :], readout), -1)
41
+
42
+ return self.project(features)
43
+
44
+
45
+ class Transpose(nn.Module):
46
+ def __init__(self, dim0, dim1):
47
+ super(Transpose, self).__init__()
48
+ self.dim0 = dim0
49
+ self.dim1 = dim1
50
+
51
+ def forward(self, x):
52
+ x = x.transpose(self.dim0, self.dim1)
53
+ return x
54
+
55
+
56
+ def forward_vit(pretrained, x):
57
+ b, c, h, w = x.shape
58
+
59
+ glob = pretrained.model.forward_flex(x)
60
+
61
+ layer_1 = pretrained.activations["1"]
62
+ layer_2 = pretrained.activations["2"]
63
+ layer_3 = pretrained.activations["3"]
64
+ layer_4 = pretrained.activations["4"]
65
+
66
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
67
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
68
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
69
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
70
+
71
+ unflatten = nn.Sequential(
72
+ nn.Unflatten(
73
+ 2,
74
+ torch.Size(
75
+ [
76
+ h // pretrained.model.patch_size[1],
77
+ w // pretrained.model.patch_size[0],
78
+ ]
79
+ ),
80
+ )
81
+ )
82
+
83
+ if layer_1.ndim == 3:
84
+ layer_1 = unflatten(layer_1)
85
+ if layer_2.ndim == 3:
86
+ layer_2 = unflatten(layer_2)
87
+ if layer_3.ndim == 3:
88
+ layer_3 = unflatten(layer_3)
89
+ if layer_4.ndim == 3:
90
+ layer_4 = unflatten(layer_4)
91
+
92
+ layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
93
+ layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
94
+ layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
95
+ layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
96
+
97
+ return layer_1, layer_2, layer_3, layer_4
98
+
99
+
100
+ def _resize_pos_embed(self, posemb, gs_h, gs_w):
101
+ posemb_tok, posemb_grid = (
102
+ posemb[:, : self.start_index],
103
+ posemb[0, self.start_index :],
104
+ )
105
+
106
+ gs_old = int(math.sqrt(len(posemb_grid)))
107
+
108
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
109
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
110
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
111
+
112
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
113
+
114
+ return posemb
115
+
116
+
117
+ def forward_flex(self, x):
118
+ b, c, h, w = x.shape
119
+
120
+ pos_embed = self._resize_pos_embed(
121
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
122
+ )
123
+
124
+ B = x.shape[0]
125
+
126
+ if hasattr(self.patch_embed, "backbone"):
127
+ x = self.patch_embed.backbone(x)
128
+ if isinstance(x, (list, tuple)):
129
+ x = x[-1] # last feature if backbone outputs list/tuple of features
130
+
131
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
132
+
133
+ if getattr(self, "dist_token", None) is not None:
134
+ cls_tokens = self.cls_token.expand(
135
+ B, -1, -1
136
+ ) # stole cls_tokens impl from Phil Wang, thanks
137
+ dist_token = self.dist_token.expand(B, -1, -1)
138
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
139
+ else:
140
+ cls_tokens = self.cls_token.expand(
141
+ B, -1, -1
142
+ ) # stole cls_tokens impl from Phil Wang, thanks
143
+ x = torch.cat((cls_tokens, x), dim=1)
144
+
145
+ x = x + pos_embed
146
+ x = self.pos_drop(x)
147
+
148
+ for blk in self.blocks:
149
+ x = blk(x)
150
+
151
+ x = self.norm(x)
152
+
153
+ return x
154
+
155
+
156
+ activations = {}
157
+
158
+
159
+ def get_activation(name):
160
+ def hook(model, input, output):
161
+ activations[name] = output
162
+
163
+ return hook
164
+
165
+
166
+ def get_readout_oper(vit_features, features, use_readout, start_index=1):
167
+ if use_readout == "ignore":
168
+ readout_oper = [Slice(start_index)] * len(features)
169
+ elif use_readout == "add":
170
+ readout_oper = [AddReadout(start_index)] * len(features)
171
+ elif use_readout == "project":
172
+ readout_oper = [
173
+ ProjectReadout(vit_features, start_index) for out_feat in features
174
+ ]
175
+ else:
176
+ assert (
177
+ False
178
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
179
+
180
+ return readout_oper
181
+
182
+
183
+ def _make_vit_b16_backbone(
184
+ model,
185
+ features=[96, 192, 384, 768],
186
+ size=[384, 384],
187
+ hooks=[2, 5, 8, 11],
188
+ vit_features=768,
189
+ use_readout="ignore",
190
+ start_index=1,
191
+ ):
192
+ pretrained = nn.Module()
193
+
194
+ pretrained.model = model
195
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
196
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
197
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
198
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
199
+
200
+ pretrained.activations = activations
201
+
202
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
203
+
204
+ # 32, 48, 136, 384
205
+ pretrained.act_postprocess1 = nn.Sequential(
206
+ readout_oper[0],
207
+ Transpose(1, 2),
208
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
209
+ nn.Conv2d(
210
+ in_channels=vit_features,
211
+ out_channels=features[0],
212
+ kernel_size=1,
213
+ stride=1,
214
+ padding=0,
215
+ ),
216
+ nn.ConvTranspose2d(
217
+ in_channels=features[0],
218
+ out_channels=features[0],
219
+ kernel_size=4,
220
+ stride=4,
221
+ padding=0,
222
+ bias=True,
223
+ dilation=1,
224
+ groups=1,
225
+ ),
226
+ )
227
+
228
+ pretrained.act_postprocess2 = nn.Sequential(
229
+ readout_oper[1],
230
+ Transpose(1, 2),
231
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
232
+ nn.Conv2d(
233
+ in_channels=vit_features,
234
+ out_channels=features[1],
235
+ kernel_size=1,
236
+ stride=1,
237
+ padding=0,
238
+ ),
239
+ nn.ConvTranspose2d(
240
+ in_channels=features[1],
241
+ out_channels=features[1],
242
+ kernel_size=2,
243
+ stride=2,
244
+ padding=0,
245
+ bias=True,
246
+ dilation=1,
247
+ groups=1,
248
+ ),
249
+ )
250
+
251
+ pretrained.act_postprocess3 = nn.Sequential(
252
+ readout_oper[2],
253
+ Transpose(1, 2),
254
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
255
+ nn.Conv2d(
256
+ in_channels=vit_features,
257
+ out_channels=features[2],
258
+ kernel_size=1,
259
+ stride=1,
260
+ padding=0,
261
+ ),
262
+ )
263
+
264
+ pretrained.act_postprocess4 = nn.Sequential(
265
+ readout_oper[3],
266
+ Transpose(1, 2),
267
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
268
+ nn.Conv2d(
269
+ in_channels=vit_features,
270
+ out_channels=features[3],
271
+ kernel_size=1,
272
+ stride=1,
273
+ padding=0,
274
+ ),
275
+ nn.Conv2d(
276
+ in_channels=features[3],
277
+ out_channels=features[3],
278
+ kernel_size=3,
279
+ stride=2,
280
+ padding=1,
281
+ ),
282
+ )
283
+
284
+ pretrained.model.start_index = start_index
285
+ pretrained.model.patch_size = [16, 16]
286
+
287
+ # We inject this function into the VisionTransformer instances so that
288
+ # we can use it with interpolated position embeddings without modifying the library source.
289
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
290
+ pretrained.model._resize_pos_embed = types.MethodType(
291
+ _resize_pos_embed, pretrained.model
292
+ )
293
+
294
+ return pretrained
295
+
296
+
297
+ def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
298
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
299
+
300
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
301
+ return _make_vit_b16_backbone(
302
+ model,
303
+ features=[256, 512, 1024, 1024],
304
+ hooks=hooks,
305
+ vit_features=1024,
306
+ use_readout=use_readout,
307
+ )
308
+
309
+
310
+ def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
311
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
312
+
313
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
314
+ return _make_vit_b16_backbone(
315
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
316
+ )
317
+
318
+
319
+ def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
320
+ model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
321
+
322
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
323
+ return _make_vit_b16_backbone(
324
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
325
+ )
326
+
327
+
328
+ def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
329
+ model = timm.create_model(
330
+ "vit_deit_base_distilled_patch16_384", pretrained=pretrained
331
+ )
332
+
333
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
334
+ return _make_vit_b16_backbone(
335
+ model,
336
+ features=[96, 192, 384, 768],
337
+ hooks=hooks,
338
+ use_readout=use_readout,
339
+ start_index=2,
340
+ )
341
+
342
+
343
+ def _make_vit_b_rn50_backbone(
344
+ model,
345
+ features=[256, 512, 768, 768],
346
+ size=[384, 384],
347
+ hooks=[0, 1, 8, 11],
348
+ vit_features=768,
349
+ use_vit_only=False,
350
+ use_readout="ignore",
351
+ start_index=1,
352
+ ):
353
+ pretrained = nn.Module()
354
+
355
+ pretrained.model = model
356
+
357
+ if use_vit_only == True:
358
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
359
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
360
+ else:
361
+ pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
362
+ get_activation("1")
363
+ )
364
+ pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
365
+ get_activation("2")
366
+ )
367
+
368
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
369
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
370
+
371
+ pretrained.activations = activations
372
+
373
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
374
+
375
+ if use_vit_only == True:
376
+ pretrained.act_postprocess1 = nn.Sequential(
377
+ readout_oper[0],
378
+ Transpose(1, 2),
379
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
380
+ nn.Conv2d(
381
+ in_channels=vit_features,
382
+ out_channels=features[0],
383
+ kernel_size=1,
384
+ stride=1,
385
+ padding=0,
386
+ ),
387
+ nn.ConvTranspose2d(
388
+ in_channels=features[0],
389
+ out_channels=features[0],
390
+ kernel_size=4,
391
+ stride=4,
392
+ padding=0,
393
+ bias=True,
394
+ dilation=1,
395
+ groups=1,
396
+ ),
397
+ )
398
+
399
+ pretrained.act_postprocess2 = nn.Sequential(
400
+ readout_oper[1],
401
+ Transpose(1, 2),
402
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
403
+ nn.Conv2d(
404
+ in_channels=vit_features,
405
+ out_channels=features[1],
406
+ kernel_size=1,
407
+ stride=1,
408
+ padding=0,
409
+ ),
410
+ nn.ConvTranspose2d(
411
+ in_channels=features[1],
412
+ out_channels=features[1],
413
+ kernel_size=2,
414
+ stride=2,
415
+ padding=0,
416
+ bias=True,
417
+ dilation=1,
418
+ groups=1,
419
+ ),
420
+ )
421
+ else:
422
+ pretrained.act_postprocess1 = nn.Sequential(
423
+ nn.Identity(), nn.Identity(), nn.Identity()
424
+ )
425
+ pretrained.act_postprocess2 = nn.Sequential(
426
+ nn.Identity(), nn.Identity(), nn.Identity()
427
+ )
428
+
429
+ pretrained.act_postprocess3 = nn.Sequential(
430
+ readout_oper[2],
431
+ Transpose(1, 2),
432
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
433
+ nn.Conv2d(
434
+ in_channels=vit_features,
435
+ out_channels=features[2],
436
+ kernel_size=1,
437
+ stride=1,
438
+ padding=0,
439
+ ),
440
+ )
441
+
442
+ pretrained.act_postprocess4 = nn.Sequential(
443
+ readout_oper[3],
444
+ Transpose(1, 2),
445
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
446
+ nn.Conv2d(
447
+ in_channels=vit_features,
448
+ out_channels=features[3],
449
+ kernel_size=1,
450
+ stride=1,
451
+ padding=0,
452
+ ),
453
+ nn.Conv2d(
454
+ in_channels=features[3],
455
+ out_channels=features[3],
456
+ kernel_size=3,
457
+ stride=2,
458
+ padding=1,
459
+ ),
460
+ )
461
+
462
+ pretrained.model.start_index = start_index
463
+ pretrained.model.patch_size = [16, 16]
464
+
465
+ # We inject this function into the VisionTransformer instances so that
466
+ # we can use it with interpolated position embeddings without modifying the library source.
467
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
468
+
469
+ # We inject this function into the VisionTransformer instances so that
470
+ # we can use it with interpolated position embeddings without modifying the library source.
471
+ pretrained.model._resize_pos_embed = types.MethodType(
472
+ _resize_pos_embed, pretrained.model
473
+ )
474
+
475
+ return pretrained
476
+
477
+
478
+ def _make_pretrained_vitb_rn50_384(
479
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
480
+ ):
481
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
482
+
483
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
484
+ return _make_vit_b_rn50_backbone(
485
+ model,
486
+ features=[256, 512, 768, 768],
487
+ size=[384, 384],
488
+ hooks=hooks,
489
+ use_vit_only=use_vit_only,
490
+ use_readout=use_readout,
491
+ )
text2tex/models/ControlNet/annotator/midas/utils.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utils for monoDepth."""
2
+ import sys
3
+ import re
4
+ import numpy as np
5
+ import cv2
6
+ import torch
7
+
8
+
9
+ def read_pfm(path):
10
+ """Read pfm file.
11
+
12
+ Args:
13
+ path (str): path to file
14
+
15
+ Returns:
16
+ tuple: (data, scale)
17
+ """
18
+ with open(path, "rb") as file:
19
+
20
+ color = None
21
+ width = None
22
+ height = None
23
+ scale = None
24
+ endian = None
25
+
26
+ header = file.readline().rstrip()
27
+ if header.decode("ascii") == "PF":
28
+ color = True
29
+ elif header.decode("ascii") == "Pf":
30
+ color = False
31
+ else:
32
+ raise Exception("Not a PFM file: " + path)
33
+
34
+ dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
35
+ if dim_match:
36
+ width, height = list(map(int, dim_match.groups()))
37
+ else:
38
+ raise Exception("Malformed PFM header.")
39
+
40
+ scale = float(file.readline().decode("ascii").rstrip())
41
+ if scale < 0:
42
+ # little-endian
43
+ endian = "<"
44
+ scale = -scale
45
+ else:
46
+ # big-endian
47
+ endian = ">"
48
+
49
+ data = np.fromfile(file, endian + "f")
50
+ shape = (height, width, 3) if color else (height, width)
51
+
52
+ data = np.reshape(data, shape)
53
+ data = np.flipud(data)
54
+
55
+ return data, scale
56
+
57
+
58
+ def write_pfm(path, image, scale=1):
59
+ """Write pfm file.
60
+
61
+ Args:
62
+ path (str): pathto file
63
+ image (array): data
64
+ scale (int, optional): Scale. Defaults to 1.
65
+ """
66
+
67
+ with open(path, "wb") as file:
68
+ color = None
69
+
70
+ if image.dtype.name != "float32":
71
+ raise Exception("Image dtype must be float32.")
72
+
73
+ image = np.flipud(image)
74
+
75
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
76
+ color = True
77
+ elif (
78
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
79
+ ): # greyscale
80
+ color = False
81
+ else:
82
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
83
+
84
+ file.write("PF\n" if color else "Pf\n".encode())
85
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
86
+
87
+ endian = image.dtype.byteorder
88
+
89
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
90
+ scale = -scale
91
+
92
+ file.write("%f\n".encode() % scale)
93
+
94
+ image.tofile(file)
95
+
96
+
97
+ def read_image(path):
98
+ """Read image and output RGB image (0-1).
99
+
100
+ Args:
101
+ path (str): path to file
102
+
103
+ Returns:
104
+ array: RGB image (0-1)
105
+ """
106
+ img = cv2.imread(path)
107
+
108
+ if img.ndim == 2:
109
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
110
+
111
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
112
+
113
+ return img
114
+
115
+
116
+ def resize_image(img):
117
+ """Resize image and make it fit for network.
118
+
119
+ Args:
120
+ img (array): image
121
+
122
+ Returns:
123
+ tensor: data ready for network
124
+ """
125
+ height_orig = img.shape[0]
126
+ width_orig = img.shape[1]
127
+
128
+ if width_orig > height_orig:
129
+ scale = width_orig / 384
130
+ else:
131
+ scale = height_orig / 384
132
+
133
+ height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
134
+ width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
135
+
136
+ img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
137
+
138
+ img_resized = (
139
+ torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
140
+ )
141
+ img_resized = img_resized.unsqueeze(0)
142
+
143
+ return img_resized
144
+
145
+
146
+ def resize_depth(depth, width, height):
147
+ """Resize depth map and bring to CPU (numpy).
148
+
149
+ Args:
150
+ depth (tensor): depth
151
+ width (int): image width
152
+ height (int): image height
153
+
154
+ Returns:
155
+ array: processed depth
156
+ """
157
+ depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
158
+
159
+ depth_resized = cv2.resize(
160
+ depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
161
+ )
162
+
163
+ return depth_resized
164
+
165
+ def write_depth(path, depth, bits=1):
166
+ """Write depth map to pfm and png file.
167
+
168
+ Args:
169
+ path (str): filepath without extension
170
+ depth (array): depth
171
+ """
172
+ write_pfm(path + ".pfm", depth.astype(np.float32))
173
+
174
+ depth_min = depth.min()
175
+ depth_max = depth.max()
176
+
177
+ max_val = (2**(8*bits))-1
178
+
179
+ if depth_max - depth_min > np.finfo("float").eps:
180
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
181
+ else:
182
+ out = np.zeros(depth.shape, dtype=depth.type)
183
+
184
+ if bits == 1:
185
+ cv2.imwrite(path + ".png", out.astype("uint8"))
186
+ elif bits == 2:
187
+ cv2.imwrite(path + ".png", out.astype("uint16"))
188
+
189
+ return
text2tex/models/ControlNet/annotator/mlsd/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
+ import os
5
+
6
+ from einops import rearrange
7
+ from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
8
+ from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
9
+ from .utils import pred_lines
10
+
11
+
12
+ model_path = './annotator/ckpts/mlsd_large_512_fp32.pth'
13
+ model = MobileV2_MLSD_Large()
14
+ model.load_state_dict(torch.load(model_path), strict=True)
15
+ model = model.cuda().eval()
16
+
17
+
18
+ def apply_mlsd(input_image, thr_v, thr_d):
19
+ assert input_image.ndim == 3
20
+ img = input_image
21
+ img_output = np.zeros_like(img)
22
+ try:
23
+ with torch.no_grad():
24
+ lines = pred_lines(img, model, [img.shape[0], img.shape[1]], thr_v, thr_d)
25
+ for line in lines:
26
+ x_start, y_start, x_end, y_end = [int(val) for val in line]
27
+ cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
28
+ except Exception as e:
29
+ pass
30
+ return img_output[:, :, 0]
text2tex/models/ControlNet/annotator/mlsd/models/mbv2_mlsd_large.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.utils.model_zoo as model_zoo
6
+ from torch.nn import functional as F
7
+
8
+
9
+ class BlockTypeA(nn.Module):
10
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
11
+ super(BlockTypeA, self).__init__()
12
+ self.conv1 = nn.Sequential(
13
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
14
+ nn.BatchNorm2d(out_c2),
15
+ nn.ReLU(inplace=True)
16
+ )
17
+ self.conv2 = nn.Sequential(
18
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
19
+ nn.BatchNorm2d(out_c1),
20
+ nn.ReLU(inplace=True)
21
+ )
22
+ self.upscale = upscale
23
+
24
+ def forward(self, a, b):
25
+ b = self.conv1(b)
26
+ a = self.conv2(a)
27
+ if self.upscale:
28
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
29
+ return torch.cat((a, b), dim=1)
30
+
31
+
32
+ class BlockTypeB(nn.Module):
33
+ def __init__(self, in_c, out_c):
34
+ super(BlockTypeB, self).__init__()
35
+ self.conv1 = nn.Sequential(
36
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
37
+ nn.BatchNorm2d(in_c),
38
+ nn.ReLU()
39
+ )
40
+ self.conv2 = nn.Sequential(
41
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
42
+ nn.BatchNorm2d(out_c),
43
+ nn.ReLU()
44
+ )
45
+
46
+ def forward(self, x):
47
+ x = self.conv1(x) + x
48
+ x = self.conv2(x)
49
+ return x
50
+
51
+ class BlockTypeC(nn.Module):
52
+ def __init__(self, in_c, out_c):
53
+ super(BlockTypeC, self).__init__()
54
+ self.conv1 = nn.Sequential(
55
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
56
+ nn.BatchNorm2d(in_c),
57
+ nn.ReLU()
58
+ )
59
+ self.conv2 = nn.Sequential(
60
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
61
+ nn.BatchNorm2d(in_c),
62
+ nn.ReLU()
63
+ )
64
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
65
+
66
+ def forward(self, x):
67
+ x = self.conv1(x)
68
+ x = self.conv2(x)
69
+ x = self.conv3(x)
70
+ return x
71
+
72
+ def _make_divisible(v, divisor, min_value=None):
73
+ """
74
+ This function is taken from the original tf repo.
75
+ It ensures that all layers have a channel number that is divisible by 8
76
+ It can be seen here:
77
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
78
+ :param v:
79
+ :param divisor:
80
+ :param min_value:
81
+ :return:
82
+ """
83
+ if min_value is None:
84
+ min_value = divisor
85
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
86
+ # Make sure that round down does not go down by more than 10%.
87
+ if new_v < 0.9 * v:
88
+ new_v += divisor
89
+ return new_v
90
+
91
+
92
+ class ConvBNReLU(nn.Sequential):
93
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
94
+ self.channel_pad = out_planes - in_planes
95
+ self.stride = stride
96
+ #padding = (kernel_size - 1) // 2
97
+
98
+ # TFLite uses slightly different padding than PyTorch
99
+ if stride == 2:
100
+ padding = 0
101
+ else:
102
+ padding = (kernel_size - 1) // 2
103
+
104
+ super(ConvBNReLU, self).__init__(
105
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
106
+ nn.BatchNorm2d(out_planes),
107
+ nn.ReLU6(inplace=True)
108
+ )
109
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
110
+
111
+
112
+ def forward(self, x):
113
+ # TFLite uses different padding
114
+ if self.stride == 2:
115
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
116
+ #print(x.shape)
117
+
118
+ for module in self:
119
+ if not isinstance(module, nn.MaxPool2d):
120
+ x = module(x)
121
+ return x
122
+
123
+
124
+ class InvertedResidual(nn.Module):
125
+ def __init__(self, inp, oup, stride, expand_ratio):
126
+ super(InvertedResidual, self).__init__()
127
+ self.stride = stride
128
+ assert stride in [1, 2]
129
+
130
+ hidden_dim = int(round(inp * expand_ratio))
131
+ self.use_res_connect = self.stride == 1 and inp == oup
132
+
133
+ layers = []
134
+ if expand_ratio != 1:
135
+ # pw
136
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
137
+ layers.extend([
138
+ # dw
139
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
140
+ # pw-linear
141
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
142
+ nn.BatchNorm2d(oup),
143
+ ])
144
+ self.conv = nn.Sequential(*layers)
145
+
146
+ def forward(self, x):
147
+ if self.use_res_connect:
148
+ return x + self.conv(x)
149
+ else:
150
+ return self.conv(x)
151
+
152
+
153
+ class MobileNetV2(nn.Module):
154
+ def __init__(self, pretrained=True):
155
+ """
156
+ MobileNet V2 main class
157
+ Args:
158
+ num_classes (int): Number of classes
159
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
160
+ inverted_residual_setting: Network structure
161
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
162
+ Set to 1 to turn off rounding
163
+ block: Module specifying inverted residual building block for mobilenet
164
+ """
165
+ super(MobileNetV2, self).__init__()
166
+
167
+ block = InvertedResidual
168
+ input_channel = 32
169
+ last_channel = 1280
170
+ width_mult = 1.0
171
+ round_nearest = 8
172
+
173
+ inverted_residual_setting = [
174
+ # t, c, n, s
175
+ [1, 16, 1, 1],
176
+ [6, 24, 2, 2],
177
+ [6, 32, 3, 2],
178
+ [6, 64, 4, 2],
179
+ [6, 96, 3, 1],
180
+ #[6, 160, 3, 2],
181
+ #[6, 320, 1, 1],
182
+ ]
183
+
184
+ # only check the first element, assuming user knows t,c,n,s are required
185
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
186
+ raise ValueError("inverted_residual_setting should be non-empty "
187
+ "or a 4-element list, got {}".format(inverted_residual_setting))
188
+
189
+ # building first layer
190
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
191
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
192
+ features = [ConvBNReLU(4, input_channel, stride=2)]
193
+ # building inverted residual blocks
194
+ for t, c, n, s in inverted_residual_setting:
195
+ output_channel = _make_divisible(c * width_mult, round_nearest)
196
+ for i in range(n):
197
+ stride = s if i == 0 else 1
198
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
199
+ input_channel = output_channel
200
+
201
+ self.features = nn.Sequential(*features)
202
+ self.fpn_selected = [1, 3, 6, 10, 13]
203
+ # weight initialization
204
+ for m in self.modules():
205
+ if isinstance(m, nn.Conv2d):
206
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
207
+ if m.bias is not None:
208
+ nn.init.zeros_(m.bias)
209
+ elif isinstance(m, nn.BatchNorm2d):
210
+ nn.init.ones_(m.weight)
211
+ nn.init.zeros_(m.bias)
212
+ elif isinstance(m, nn.Linear):
213
+ nn.init.normal_(m.weight, 0, 0.01)
214
+ nn.init.zeros_(m.bias)
215
+ if pretrained:
216
+ self._load_pretrained_model()
217
+
218
+ def _forward_impl(self, x):
219
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
220
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
221
+ fpn_features = []
222
+ for i, f in enumerate(self.features):
223
+ if i > self.fpn_selected[-1]:
224
+ break
225
+ x = f(x)
226
+ if i in self.fpn_selected:
227
+ fpn_features.append(x)
228
+
229
+ c1, c2, c3, c4, c5 = fpn_features
230
+ return c1, c2, c3, c4, c5
231
+
232
+
233
+ def forward(self, x):
234
+ return self._forward_impl(x)
235
+
236
+ def _load_pretrained_model(self):
237
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
238
+ model_dict = {}
239
+ state_dict = self.state_dict()
240
+ for k, v in pretrain_dict.items():
241
+ if k in state_dict:
242
+ model_dict[k] = v
243
+ state_dict.update(model_dict)
244
+ self.load_state_dict(state_dict)
245
+
246
+
247
+ class MobileV2_MLSD_Large(nn.Module):
248
+ def __init__(self):
249
+ super(MobileV2_MLSD_Large, self).__init__()
250
+
251
+ self.backbone = MobileNetV2(pretrained=False)
252
+ ## A, B
253
+ self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
254
+ out_c1= 64, out_c2=64,
255
+ upscale=False)
256
+ self.block16 = BlockTypeB(128, 64)
257
+
258
+ ## A, B
259
+ self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
260
+ out_c1= 64, out_c2= 64)
261
+ self.block18 = BlockTypeB(128, 64)
262
+
263
+ ## A, B
264
+ self.block19 = BlockTypeA(in_c1=24, in_c2=64,
265
+ out_c1=64, out_c2=64)
266
+ self.block20 = BlockTypeB(128, 64)
267
+
268
+ ## A, B, C
269
+ self.block21 = BlockTypeA(in_c1=16, in_c2=64,
270
+ out_c1=64, out_c2=64)
271
+ self.block22 = BlockTypeB(128, 64)
272
+
273
+ self.block23 = BlockTypeC(64, 16)
274
+
275
+ def forward(self, x):
276
+ c1, c2, c3, c4, c5 = self.backbone(x)
277
+
278
+ x = self.block15(c4, c5)
279
+ x = self.block16(x)
280
+
281
+ x = self.block17(c3, x)
282
+ x = self.block18(x)
283
+
284
+ x = self.block19(c2, x)
285
+ x = self.block20(x)
286
+
287
+ x = self.block21(c1, x)
288
+ x = self.block22(x)
289
+ x = self.block23(x)
290
+ x = x[:, 7:, :, :]
291
+
292
+ return x
text2tex/models/ControlNet/annotator/mlsd/models/mbv2_mlsd_tiny.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.utils.model_zoo as model_zoo
6
+ from torch.nn import functional as F
7
+
8
+
9
+ class BlockTypeA(nn.Module):
10
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
11
+ super(BlockTypeA, self).__init__()
12
+ self.conv1 = nn.Sequential(
13
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
14
+ nn.BatchNorm2d(out_c2),
15
+ nn.ReLU(inplace=True)
16
+ )
17
+ self.conv2 = nn.Sequential(
18
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
19
+ nn.BatchNorm2d(out_c1),
20
+ nn.ReLU(inplace=True)
21
+ )
22
+ self.upscale = upscale
23
+
24
+ def forward(self, a, b):
25
+ b = self.conv1(b)
26
+ a = self.conv2(a)
27
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
28
+ return torch.cat((a, b), dim=1)
29
+
30
+
31
+ class BlockTypeB(nn.Module):
32
+ def __init__(self, in_c, out_c):
33
+ super(BlockTypeB, self).__init__()
34
+ self.conv1 = nn.Sequential(
35
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
36
+ nn.BatchNorm2d(in_c),
37
+ nn.ReLU()
38
+ )
39
+ self.conv2 = nn.Sequential(
40
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
41
+ nn.BatchNorm2d(out_c),
42
+ nn.ReLU()
43
+ )
44
+
45
+ def forward(self, x):
46
+ x = self.conv1(x) + x
47
+ x = self.conv2(x)
48
+ return x
49
+
50
+ class BlockTypeC(nn.Module):
51
+ def __init__(self, in_c, out_c):
52
+ super(BlockTypeC, self).__init__()
53
+ self.conv1 = nn.Sequential(
54
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
55
+ nn.BatchNorm2d(in_c),
56
+ nn.ReLU()
57
+ )
58
+ self.conv2 = nn.Sequential(
59
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
60
+ nn.BatchNorm2d(in_c),
61
+ nn.ReLU()
62
+ )
63
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
64
+
65
+ def forward(self, x):
66
+ x = self.conv1(x)
67
+ x = self.conv2(x)
68
+ x = self.conv3(x)
69
+ return x
70
+
71
+ def _make_divisible(v, divisor, min_value=None):
72
+ """
73
+ This function is taken from the original tf repo.
74
+ It ensures that all layers have a channel number that is divisible by 8
75
+ It can be seen here:
76
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
77
+ :param v:
78
+ :param divisor:
79
+ :param min_value:
80
+ :return:
81
+ """
82
+ if min_value is None:
83
+ min_value = divisor
84
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
85
+ # Make sure that round down does not go down by more than 10%.
86
+ if new_v < 0.9 * v:
87
+ new_v += divisor
88
+ return new_v
89
+
90
+
91
+ class ConvBNReLU(nn.Sequential):
92
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
93
+ self.channel_pad = out_planes - in_planes
94
+ self.stride = stride
95
+ #padding = (kernel_size - 1) // 2
96
+
97
+ # TFLite uses slightly different padding than PyTorch
98
+ if stride == 2:
99
+ padding = 0
100
+ else:
101
+ padding = (kernel_size - 1) // 2
102
+
103
+ super(ConvBNReLU, self).__init__(
104
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
105
+ nn.BatchNorm2d(out_planes),
106
+ nn.ReLU6(inplace=True)
107
+ )
108
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
109
+
110
+
111
+ def forward(self, x):
112
+ # TFLite uses different padding
113
+ if self.stride == 2:
114
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
115
+ #print(x.shape)
116
+
117
+ for module in self:
118
+ if not isinstance(module, nn.MaxPool2d):
119
+ x = module(x)
120
+ return x
121
+
122
+
123
+ class InvertedResidual(nn.Module):
124
+ def __init__(self, inp, oup, stride, expand_ratio):
125
+ super(InvertedResidual, self).__init__()
126
+ self.stride = stride
127
+ assert stride in [1, 2]
128
+
129
+ hidden_dim = int(round(inp * expand_ratio))
130
+ self.use_res_connect = self.stride == 1 and inp == oup
131
+
132
+ layers = []
133
+ if expand_ratio != 1:
134
+ # pw
135
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
136
+ layers.extend([
137
+ # dw
138
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
139
+ # pw-linear
140
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
141
+ nn.BatchNorm2d(oup),
142
+ ])
143
+ self.conv = nn.Sequential(*layers)
144
+
145
+ def forward(self, x):
146
+ if self.use_res_connect:
147
+ return x + self.conv(x)
148
+ else:
149
+ return self.conv(x)
150
+
151
+
152
+ class MobileNetV2(nn.Module):
153
+ def __init__(self, pretrained=True):
154
+ """
155
+ MobileNet V2 main class
156
+ Args:
157
+ num_classes (int): Number of classes
158
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
159
+ inverted_residual_setting: Network structure
160
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
161
+ Set to 1 to turn off rounding
162
+ block: Module specifying inverted residual building block for mobilenet
163
+ """
164
+ super(MobileNetV2, self).__init__()
165
+
166
+ block = InvertedResidual
167
+ input_channel = 32
168
+ last_channel = 1280
169
+ width_mult = 1.0
170
+ round_nearest = 8
171
+
172
+ inverted_residual_setting = [
173
+ # t, c, n, s
174
+ [1, 16, 1, 1],
175
+ [6, 24, 2, 2],
176
+ [6, 32, 3, 2],
177
+ [6, 64, 4, 2],
178
+ #[6, 96, 3, 1],
179
+ #[6, 160, 3, 2],
180
+ #[6, 320, 1, 1],
181
+ ]
182
+
183
+ # only check the first element, assuming user knows t,c,n,s are required
184
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
185
+ raise ValueError("inverted_residual_setting should be non-empty "
186
+ "or a 4-element list, got {}".format(inverted_residual_setting))
187
+
188
+ # building first layer
189
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
190
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
191
+ features = [ConvBNReLU(4, input_channel, stride=2)]
192
+ # building inverted residual blocks
193
+ for t, c, n, s in inverted_residual_setting:
194
+ output_channel = _make_divisible(c * width_mult, round_nearest)
195
+ for i in range(n):
196
+ stride = s if i == 0 else 1
197
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
198
+ input_channel = output_channel
199
+ self.features = nn.Sequential(*features)
200
+
201
+ self.fpn_selected = [3, 6, 10]
202
+ # weight initialization
203
+ for m in self.modules():
204
+ if isinstance(m, nn.Conv2d):
205
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
206
+ if m.bias is not None:
207
+ nn.init.zeros_(m.bias)
208
+ elif isinstance(m, nn.BatchNorm2d):
209
+ nn.init.ones_(m.weight)
210
+ nn.init.zeros_(m.bias)
211
+ elif isinstance(m, nn.Linear):
212
+ nn.init.normal_(m.weight, 0, 0.01)
213
+ nn.init.zeros_(m.bias)
214
+
215
+ #if pretrained:
216
+ # self._load_pretrained_model()
217
+
218
+ def _forward_impl(self, x):
219
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
220
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
221
+ fpn_features = []
222
+ for i, f in enumerate(self.features):
223
+ if i > self.fpn_selected[-1]:
224
+ break
225
+ x = f(x)
226
+ if i in self.fpn_selected:
227
+ fpn_features.append(x)
228
+
229
+ c2, c3, c4 = fpn_features
230
+ return c2, c3, c4
231
+
232
+
233
+ def forward(self, x):
234
+ return self._forward_impl(x)
235
+
236
+ def _load_pretrained_model(self):
237
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
238
+ model_dict = {}
239
+ state_dict = self.state_dict()
240
+ for k, v in pretrain_dict.items():
241
+ if k in state_dict:
242
+ model_dict[k] = v
243
+ state_dict.update(model_dict)
244
+ self.load_state_dict(state_dict)
245
+
246
+
247
+ class MobileV2_MLSD_Tiny(nn.Module):
248
+ def __init__(self):
249
+ super(MobileV2_MLSD_Tiny, self).__init__()
250
+
251
+ self.backbone = MobileNetV2(pretrained=True)
252
+
253
+ self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
254
+ out_c1= 64, out_c2=64)
255
+ self.block13 = BlockTypeB(128, 64)
256
+
257
+ self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
258
+ out_c1= 32, out_c2= 32)
259
+ self.block15 = BlockTypeB(64, 64)
260
+
261
+ self.block16 = BlockTypeC(64, 16)
262
+
263
+ def forward(self, x):
264
+ c2, c3, c4 = self.backbone(x)
265
+
266
+ x = self.block12(c3, c4)
267
+ x = self.block13(x)
268
+ x = self.block14(c2, x)
269
+ x = self.block15(x)
270
+ x = self.block16(x)
271
+ x = x[:, 7:, :, :]
272
+ #print(x.shape)
273
+ x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
274
+
275
+ return x
text2tex/models/ControlNet/annotator/mlsd/utils.py ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ modified by lihaoweicv
3
+ pytorch version
4
+ '''
5
+
6
+ '''
7
+ M-LSD
8
+ Copyright 2021-present NAVER Corp.
9
+ Apache License v2.0
10
+ '''
11
+
12
+ import os
13
+ import numpy as np
14
+ import cv2
15
+ import torch
16
+ from torch.nn import functional as F
17
+
18
+
19
+ def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
20
+ '''
21
+ tpMap:
22
+ center: tpMap[1, 0, :, :]
23
+ displacement: tpMap[1, 1:5, :, :]
24
+ '''
25
+ b, c, h, w = tpMap.shape
26
+ assert b==1, 'only support bsize==1'
27
+ displacement = tpMap[:, 1:5, :, :][0]
28
+ center = tpMap[:, 0, :, :]
29
+ heat = torch.sigmoid(center)
30
+ hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
31
+ keep = (hmax == heat).float()
32
+ heat = heat * keep
33
+ heat = heat.reshape(-1, )
34
+
35
+ scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
36
+ yy = torch.floor_divide(indices, w).unsqueeze(-1)
37
+ xx = torch.fmod(indices, w).unsqueeze(-1)
38
+ ptss = torch.cat((yy, xx),dim=-1)
39
+
40
+ ptss = ptss.detach().cpu().numpy()
41
+ scores = scores.detach().cpu().numpy()
42
+ displacement = displacement.detach().cpu().numpy()
43
+ displacement = displacement.transpose((1,2,0))
44
+ return ptss, scores, displacement
45
+
46
+
47
+ def pred_lines(image, model,
48
+ input_shape=[512, 512],
49
+ score_thr=0.10,
50
+ dist_thr=20.0):
51
+ h, w, _ = image.shape
52
+ h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
53
+
54
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
55
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
56
+
57
+ resized_image = resized_image.transpose((2,0,1))
58
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
59
+ batch_image = (batch_image / 127.5) - 1.0
60
+
61
+ batch_image = torch.from_numpy(batch_image).float().cuda()
62
+ outputs = model(batch_image)
63
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
64
+ start = vmap[:, :, :2]
65
+ end = vmap[:, :, 2:]
66
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
67
+
68
+ segments_list = []
69
+ for center, score in zip(pts, pts_score):
70
+ y, x = center
71
+ distance = dist_map[y, x]
72
+ if score > score_thr and distance > dist_thr:
73
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
74
+ x_start = x + disp_x_start
75
+ y_start = y + disp_y_start
76
+ x_end = x + disp_x_end
77
+ y_end = y + disp_y_end
78
+ segments_list.append([x_start, y_start, x_end, y_end])
79
+
80
+ lines = 2 * np.array(segments_list) # 256 > 512
81
+ lines[:, 0] = lines[:, 0] * w_ratio
82
+ lines[:, 1] = lines[:, 1] * h_ratio
83
+ lines[:, 2] = lines[:, 2] * w_ratio
84
+ lines[:, 3] = lines[:, 3] * h_ratio
85
+
86
+ return lines
87
+
88
+
89
+ def pred_squares(image,
90
+ model,
91
+ input_shape=[512, 512],
92
+ params={'score': 0.06,
93
+ 'outside_ratio': 0.28,
94
+ 'inside_ratio': 0.45,
95
+ 'w_overlap': 0.0,
96
+ 'w_degree': 1.95,
97
+ 'w_length': 0.0,
98
+ 'w_area': 1.86,
99
+ 'w_center': 0.14}):
100
+ '''
101
+ shape = [height, width]
102
+ '''
103
+ h, w, _ = image.shape
104
+ original_shape = [h, w]
105
+
106
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
107
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
108
+ resized_image = resized_image.transpose((2, 0, 1))
109
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
110
+ batch_image = (batch_image / 127.5) - 1.0
111
+
112
+ batch_image = torch.from_numpy(batch_image).float().cuda()
113
+ outputs = model(batch_image)
114
+
115
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
116
+ start = vmap[:, :, :2] # (x, y)
117
+ end = vmap[:, :, 2:] # (x, y)
118
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
119
+
120
+ junc_list = []
121
+ segments_list = []
122
+ for junc, score in zip(pts, pts_score):
123
+ y, x = junc
124
+ distance = dist_map[y, x]
125
+ if score > params['score'] and distance > 20.0:
126
+ junc_list.append([x, y])
127
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
128
+ d_arrow = 1.0
129
+ x_start = x + d_arrow * disp_x_start
130
+ y_start = y + d_arrow * disp_y_start
131
+ x_end = x + d_arrow * disp_x_end
132
+ y_end = y + d_arrow * disp_y_end
133
+ segments_list.append([x_start, y_start, x_end, y_end])
134
+
135
+ segments = np.array(segments_list)
136
+
137
+ ####### post processing for squares
138
+ # 1. get unique lines
139
+ point = np.array([[0, 0]])
140
+ point = point[0]
141
+ start = segments[:, :2]
142
+ end = segments[:, 2:]
143
+ diff = start - end
144
+ a = diff[:, 1]
145
+ b = -diff[:, 0]
146
+ c = a * start[:, 0] + b * start[:, 1]
147
+
148
+ d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
149
+ theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
150
+ theta[theta < 0.0] += 180
151
+ hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
152
+
153
+ d_quant = 1
154
+ theta_quant = 2
155
+ hough[:, 0] //= d_quant
156
+ hough[:, 1] //= theta_quant
157
+ _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
158
+
159
+ acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
160
+ idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
161
+ yx_indices = hough[indices, :].astype('int32')
162
+ acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
163
+ idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
164
+
165
+ acc_map_np = acc_map
166
+ # acc_map = acc_map[None, :, :, None]
167
+ #
168
+ # ### fast suppression using tensorflow op
169
+ # acc_map = tf.constant(acc_map, dtype=tf.float32)
170
+ # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
171
+ # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
172
+ # flatten_acc_map = tf.reshape(acc_map, [1, -1])
173
+ # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
174
+ # _, h, w, _ = acc_map.shape
175
+ # y = tf.expand_dims(topk_indices // w, axis=-1)
176
+ # x = tf.expand_dims(topk_indices % w, axis=-1)
177
+ # yx = tf.concat([y, x], axis=-1)
178
+
179
+ ### fast suppression using pytorch op
180
+ acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
181
+ _,_, h, w = acc_map.shape
182
+ max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
183
+ acc_map = acc_map * ( (acc_map == max_acc_map).float() )
184
+ flatten_acc_map = acc_map.reshape([-1, ])
185
+
186
+ scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
187
+ yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
188
+ xx = torch.fmod(indices, w).unsqueeze(-1)
189
+ yx = torch.cat((yy, xx), dim=-1)
190
+
191
+ yx = yx.detach().cpu().numpy()
192
+
193
+ topk_values = scores.detach().cpu().numpy()
194
+ indices = idx_map[yx[:, 0], yx[:, 1]]
195
+ basis = 5 // 2
196
+
197
+ merged_segments = []
198
+ for yx_pt, max_indice, value in zip(yx, indices, topk_values):
199
+ y, x = yx_pt
200
+ if max_indice == -1 or value == 0:
201
+ continue
202
+ segment_list = []
203
+ for y_offset in range(-basis, basis + 1):
204
+ for x_offset in range(-basis, basis + 1):
205
+ indice = idx_map[y + y_offset, x + x_offset]
206
+ cnt = int(acc_map_np[y + y_offset, x + x_offset])
207
+ if indice != -1:
208
+ segment_list.append(segments[indice])
209
+ if cnt > 1:
210
+ check_cnt = 1
211
+ current_hough = hough[indice]
212
+ for new_indice, new_hough in enumerate(hough):
213
+ if (current_hough == new_hough).all() and indice != new_indice:
214
+ segment_list.append(segments[new_indice])
215
+ check_cnt += 1
216
+ if check_cnt == cnt:
217
+ break
218
+ group_segments = np.array(segment_list).reshape([-1, 2])
219
+ sorted_group_segments = np.sort(group_segments, axis=0)
220
+ x_min, y_min = sorted_group_segments[0, :]
221
+ x_max, y_max = sorted_group_segments[-1, :]
222
+
223
+ deg = theta[max_indice]
224
+ if deg >= 90:
225
+ merged_segments.append([x_min, y_max, x_max, y_min])
226
+ else:
227
+ merged_segments.append([x_min, y_min, x_max, y_max])
228
+
229
+ # 2. get intersections
230
+ new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
231
+ start = new_segments[:, :2] # (x1, y1)
232
+ end = new_segments[:, 2:] # (x2, y2)
233
+ new_centers = (start + end) / 2.0
234
+ diff = start - end
235
+ dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
236
+
237
+ # ax + by = c
238
+ a = diff[:, 1]
239
+ b = -diff[:, 0]
240
+ c = a * start[:, 0] + b * start[:, 1]
241
+ pre_det = a[:, None] * b[None, :]
242
+ det = pre_det - np.transpose(pre_det)
243
+
244
+ pre_inter_y = a[:, None] * c[None, :]
245
+ inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
246
+ pre_inter_x = c[:, None] * b[None, :]
247
+ inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
248
+ inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
249
+
250
+ # 3. get corner information
251
+ # 3.1 get distance
252
+ '''
253
+ dist_segments:
254
+ | dist(0), dist(1), dist(2), ...|
255
+ dist_inter_to_segment1:
256
+ | dist(inter,0), dist(inter,0), dist(inter,0), ... |
257
+ | dist(inter,1), dist(inter,1), dist(inter,1), ... |
258
+ ...
259
+ dist_inter_to_semgnet2:
260
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
261
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
262
+ ...
263
+ '''
264
+
265
+ dist_inter_to_segment1_start = np.sqrt(
266
+ np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
267
+ dist_inter_to_segment1_end = np.sqrt(
268
+ np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
269
+ dist_inter_to_segment2_start = np.sqrt(
270
+ np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
271
+ dist_inter_to_segment2_end = np.sqrt(
272
+ np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
273
+
274
+ # sort ascending
275
+ dist_inter_to_segment1 = np.sort(
276
+ np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
277
+ axis=-1) # [n_batch, n_batch, 2]
278
+ dist_inter_to_segment2 = np.sort(
279
+ np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
280
+ axis=-1) # [n_batch, n_batch, 2]
281
+
282
+ # 3.2 get degree
283
+ inter_to_start = new_centers[:, None, :] - inter_pts
284
+ deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
285
+ deg_inter_to_start[deg_inter_to_start < 0.0] += 360
286
+ inter_to_end = new_centers[None, :, :] - inter_pts
287
+ deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
288
+ deg_inter_to_end[deg_inter_to_end < 0.0] += 360
289
+
290
+ '''
291
+ B -- G
292
+ | |
293
+ C -- R
294
+ B : blue / G: green / C: cyan / R: red
295
+
296
+ 0 -- 1
297
+ | |
298
+ 3 -- 2
299
+ '''
300
+ # rename variables
301
+ deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
302
+ # sort deg ascending
303
+ deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
304
+
305
+ deg_diff_map = np.abs(deg1_map - deg2_map)
306
+ # we only consider the smallest degree of intersect
307
+ deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
308
+
309
+ # define available degree range
310
+ deg_range = [60, 120]
311
+
312
+ corner_dict = {corner_info: [] for corner_info in range(4)}
313
+ inter_points = []
314
+ for i in range(inter_pts.shape[0]):
315
+ for j in range(i + 1, inter_pts.shape[1]):
316
+ # i, j > line index, always i < j
317
+ x, y = inter_pts[i, j, :]
318
+ deg1, deg2 = deg_sort[i, j, :]
319
+ deg_diff = deg_diff_map[i, j]
320
+
321
+ check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
322
+
323
+ outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
324
+ inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
325
+ check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
326
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
327
+ (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
328
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
329
+ ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
330
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
331
+ (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
332
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
333
+
334
+ if check_degree and check_distance:
335
+ corner_info = None
336
+
337
+ if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
338
+ (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
339
+ corner_info, color_info = 0, 'blue'
340
+ elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
341
+ corner_info, color_info = 1, 'green'
342
+ elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
343
+ corner_info, color_info = 2, 'black'
344
+ elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
345
+ (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
346
+ corner_info, color_info = 3, 'cyan'
347
+ else:
348
+ corner_info, color_info = 4, 'red' # we don't use it
349
+ continue
350
+
351
+ corner_dict[corner_info].append([x, y, i, j])
352
+ inter_points.append([x, y])
353
+
354
+ square_list = []
355
+ connect_list = []
356
+ segments_list = []
357
+ for corner0 in corner_dict[0]:
358
+ for corner1 in corner_dict[1]:
359
+ connect01 = False
360
+ for corner0_line in corner0[2:]:
361
+ if corner0_line in corner1[2:]:
362
+ connect01 = True
363
+ break
364
+ if connect01:
365
+ for corner2 in corner_dict[2]:
366
+ connect12 = False
367
+ for corner1_line in corner1[2:]:
368
+ if corner1_line in corner2[2:]:
369
+ connect12 = True
370
+ break
371
+ if connect12:
372
+ for corner3 in corner_dict[3]:
373
+ connect23 = False
374
+ for corner2_line in corner2[2:]:
375
+ if corner2_line in corner3[2:]:
376
+ connect23 = True
377
+ break
378
+ if connect23:
379
+ for corner3_line in corner3[2:]:
380
+ if corner3_line in corner0[2:]:
381
+ # SQUARE!!!
382
+ '''
383
+ 0 -- 1
384
+ | |
385
+ 3 -- 2
386
+ square_list:
387
+ order: 0 > 1 > 2 > 3
388
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
389
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
390
+ ...
391
+ connect_list:
392
+ order: 01 > 12 > 23 > 30
393
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
394
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
395
+ ...
396
+ segments_list:
397
+ order: 0 > 1 > 2 > 3
398
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
399
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
400
+ ...
401
+ '''
402
+ square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
403
+ connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
404
+ segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
405
+
406
+ def check_outside_inside(segments_info, connect_idx):
407
+ # return 'outside or inside', min distance, cover_param, peri_param
408
+ if connect_idx == segments_info[0]:
409
+ check_dist_mat = dist_inter_to_segment1
410
+ else:
411
+ check_dist_mat = dist_inter_to_segment2
412
+
413
+ i, j = segments_info
414
+ min_dist, max_dist = check_dist_mat[i, j, :]
415
+ connect_dist = dist_segments[connect_idx]
416
+ if max_dist > connect_dist:
417
+ return 'outside', min_dist, 0, 1
418
+ else:
419
+ return 'inside', min_dist, -1, -1
420
+
421
+ top_square = None
422
+
423
+ try:
424
+ map_size = input_shape[0] / 2
425
+ squares = np.array(square_list).reshape([-1, 4, 2])
426
+ score_array = []
427
+ connect_array = np.array(connect_list)
428
+ segments_array = np.array(segments_list).reshape([-1, 4, 2])
429
+
430
+ # get degree of corners:
431
+ squares_rollup = np.roll(squares, 1, axis=1)
432
+ squares_rolldown = np.roll(squares, -1, axis=1)
433
+ vec1 = squares_rollup - squares
434
+ normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
435
+ vec2 = squares_rolldown - squares
436
+ normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
437
+ inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
438
+ squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
439
+
440
+ # get square score
441
+ overlap_scores = []
442
+ degree_scores = []
443
+ length_scores = []
444
+
445
+ for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
446
+ '''
447
+ 0 -- 1
448
+ | |
449
+ 3 -- 2
450
+
451
+ # segments: [4, 2]
452
+ # connects: [4]
453
+ '''
454
+
455
+ ###################################### OVERLAP SCORES
456
+ cover = 0
457
+ perimeter = 0
458
+ # check 0 > 1 > 2 > 3
459
+ square_length = []
460
+
461
+ for start_idx in range(4):
462
+ end_idx = (start_idx + 1) % 4
463
+
464
+ connect_idx = connects[start_idx] # segment idx of segment01
465
+ start_segments = segments[start_idx]
466
+ end_segments = segments[end_idx]
467
+
468
+ start_point = square[start_idx]
469
+ end_point = square[end_idx]
470
+
471
+ # check whether outside or inside
472
+ start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
473
+ connect_idx)
474
+ end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
475
+
476
+ cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
477
+ perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
478
+
479
+ square_length.append(
480
+ dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
481
+
482
+ overlap_scores.append(cover / perimeter)
483
+ ######################################
484
+ ###################################### DEGREE SCORES
485
+ '''
486
+ deg0 vs deg2
487
+ deg1 vs deg3
488
+ '''
489
+ deg0, deg1, deg2, deg3 = degree
490
+ deg_ratio1 = deg0 / deg2
491
+ if deg_ratio1 > 1.0:
492
+ deg_ratio1 = 1 / deg_ratio1
493
+ deg_ratio2 = deg1 / deg3
494
+ if deg_ratio2 > 1.0:
495
+ deg_ratio2 = 1 / deg_ratio2
496
+ degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
497
+ ######################################
498
+ ###################################### LENGTH SCORES
499
+ '''
500
+ len0 vs len2
501
+ len1 vs len3
502
+ '''
503
+ len0, len1, len2, len3 = square_length
504
+ len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
505
+ len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
506
+ length_scores.append((len_ratio1 + len_ratio2) / 2)
507
+
508
+ ######################################
509
+
510
+ overlap_scores = np.array(overlap_scores)
511
+ overlap_scores /= np.max(overlap_scores)
512
+
513
+ degree_scores = np.array(degree_scores)
514
+ # degree_scores /= np.max(degree_scores)
515
+
516
+ length_scores = np.array(length_scores)
517
+
518
+ ###################################### AREA SCORES
519
+ area_scores = np.reshape(squares, [-1, 4, 2])
520
+ area_x = area_scores[:, :, 0]
521
+ area_y = area_scores[:, :, 1]
522
+ correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
523
+ area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
524
+ area_scores = 0.5 * np.abs(area_scores + correction)
525
+ area_scores /= (map_size * map_size) # np.max(area_scores)
526
+ ######################################
527
+
528
+ ###################################### CENTER SCORES
529
+ centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
530
+ # squares: [n, 4, 2]
531
+ square_centers = np.mean(squares, axis=1) # [n, 2]
532
+ center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
533
+ center_scores = center2center / (map_size / np.sqrt(2.0))
534
+
535
+ '''
536
+ score_w = [overlap, degree, area, center, length]
537
+ '''
538
+ score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
539
+ score_array = params['w_overlap'] * overlap_scores \
540
+ + params['w_degree'] * degree_scores \
541
+ + params['w_area'] * area_scores \
542
+ - params['w_center'] * center_scores \
543
+ + params['w_length'] * length_scores
544
+
545
+ best_square = []
546
+
547
+ sorted_idx = np.argsort(score_array)[::-1]
548
+ score_array = score_array[sorted_idx]
549
+ squares = squares[sorted_idx]
550
+
551
+ except Exception as e:
552
+ pass
553
+
554
+ '''return list
555
+ merged_lines, squares, scores
556
+ '''
557
+
558
+ try:
559
+ new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
560
+ new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
561
+ new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
562
+ new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
563
+ except:
564
+ new_segments = []
565
+
566
+ try:
567
+ squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
568
+ squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
569
+ except:
570
+ squares = []
571
+ score_array = []
572
+
573
+ try:
574
+ inter_points = np.array(inter_points)
575
+ inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
576
+ inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
577
+ except:
578
+ inter_points = []
579
+
580
+ return new_segments, squares, score_array, inter_points
text2tex/models/ControlNet/annotator/openpose/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
3
+
4
+ import torch
5
+ import numpy as np
6
+ from . import util
7
+ from .body import Body
8
+ from .hand import Hand
9
+
10
+ body_estimation = Body('./annotator/ckpts/body_pose_model.pth')
11
+ hand_estimation = Hand('./annotator/ckpts/hand_pose_model.pth')
12
+
13
+
14
+ def apply_openpose(oriImg, hand=False):
15
+ oriImg = oriImg[:, :, ::-1].copy()
16
+ with torch.no_grad():
17
+ candidate, subset = body_estimation(oriImg)
18
+ canvas = np.zeros_like(oriImg)
19
+ canvas = util.draw_bodypose(canvas, candidate, subset)
20
+ if hand:
21
+ hands_list = util.handDetect(candidate, subset, oriImg)
22
+ all_hand_peaks = []
23
+ for x, y, w, is_left in hands_list:
24
+ peaks = hand_estimation(oriImg[y:y+w, x:x+w, :])
25
+ peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
26
+ peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
27
+ all_hand_peaks.append(peaks)
28
+ canvas = util.draw_handpose(canvas, all_hand_peaks)
29
+ return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
text2tex/models/ControlNet/annotator/openpose/body.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import math
4
+ import time
5
+ from scipy.ndimage.filters import gaussian_filter
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib
8
+ import torch
9
+ from torchvision import transforms
10
+
11
+ from . import util
12
+ from .model import bodypose_model
13
+
14
+ class Body(object):
15
+ def __init__(self, model_path):
16
+ self.model = bodypose_model()
17
+ if torch.cuda.is_available():
18
+ self.model = self.model.cuda()
19
+ print('cuda')
20
+ model_dict = util.transfer(self.model, torch.load(model_path))
21
+ self.model.load_state_dict(model_dict)
22
+ self.model.eval()
23
+
24
+ def __call__(self, oriImg):
25
+ # scale_search = [0.5, 1.0, 1.5, 2.0]
26
+ scale_search = [0.5]
27
+ boxsize = 368
28
+ stride = 8
29
+ padValue = 128
30
+ thre1 = 0.1
31
+ thre2 = 0.05
32
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
33
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
34
+ paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
35
+
36
+ for m in range(len(multiplier)):
37
+ scale = multiplier[m]
38
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
39
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
40
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
41
+ im = np.ascontiguousarray(im)
42
+
43
+ data = torch.from_numpy(im).float()
44
+ if torch.cuda.is_available():
45
+ data = data.cuda()
46
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
47
+ with torch.no_grad():
48
+ Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
49
+ Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
50
+ Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
51
+
52
+ # extract outputs, resize, and remove padding
53
+ # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
54
+ heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
55
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
56
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
57
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
58
+
59
+ # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
60
+ paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
61
+ paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
62
+ paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
63
+ paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
64
+
65
+ heatmap_avg += heatmap_avg + heatmap / len(multiplier)
66
+ paf_avg += + paf / len(multiplier)
67
+
68
+ all_peaks = []
69
+ peak_counter = 0
70
+
71
+ for part in range(18):
72
+ map_ori = heatmap_avg[:, :, part]
73
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
74
+
75
+ map_left = np.zeros(one_heatmap.shape)
76
+ map_left[1:, :] = one_heatmap[:-1, :]
77
+ map_right = np.zeros(one_heatmap.shape)
78
+ map_right[:-1, :] = one_heatmap[1:, :]
79
+ map_up = np.zeros(one_heatmap.shape)
80
+ map_up[:, 1:] = one_heatmap[:, :-1]
81
+ map_down = np.zeros(one_heatmap.shape)
82
+ map_down[:, :-1] = one_heatmap[:, 1:]
83
+
84
+ peaks_binary = np.logical_and.reduce(
85
+ (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
86
+ peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
87
+ peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
88
+ peak_id = range(peak_counter, peak_counter + len(peaks))
89
+ peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
90
+
91
+ all_peaks.append(peaks_with_score_and_id)
92
+ peak_counter += len(peaks)
93
+
94
+ # find connection in the specified sequence, center 29 is in the position 15
95
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
96
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
97
+ [1, 16], [16, 18], [3, 17], [6, 18]]
98
+ # the middle joints heatmap correpondence
99
+ mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
100
+ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
101
+ [55, 56], [37, 38], [45, 46]]
102
+
103
+ connection_all = []
104
+ special_k = []
105
+ mid_num = 10
106
+
107
+ for k in range(len(mapIdx)):
108
+ score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
109
+ candA = all_peaks[limbSeq[k][0] - 1]
110
+ candB = all_peaks[limbSeq[k][1] - 1]
111
+ nA = len(candA)
112
+ nB = len(candB)
113
+ indexA, indexB = limbSeq[k]
114
+ if (nA != 0 and nB != 0):
115
+ connection_candidate = []
116
+ for i in range(nA):
117
+ for j in range(nB):
118
+ vec = np.subtract(candB[j][:2], candA[i][:2])
119
+ norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
120
+ norm = max(0.001, norm)
121
+ vec = np.divide(vec, norm)
122
+
123
+ startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
124
+ np.linspace(candA[i][1], candB[j][1], num=mid_num)))
125
+
126
+ vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
127
+ for I in range(len(startend))])
128
+ vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
129
+ for I in range(len(startend))])
130
+
131
+ score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
132
+ score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
133
+ 0.5 * oriImg.shape[0] / norm - 1, 0)
134
+ criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
135
+ criterion2 = score_with_dist_prior > 0
136
+ if criterion1 and criterion2:
137
+ connection_candidate.append(
138
+ [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
139
+
140
+ connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
141
+ connection = np.zeros((0, 5))
142
+ for c in range(len(connection_candidate)):
143
+ i, j, s = connection_candidate[c][0:3]
144
+ if (i not in connection[:, 3] and j not in connection[:, 4]):
145
+ connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
146
+ if (len(connection) >= min(nA, nB)):
147
+ break
148
+
149
+ connection_all.append(connection)
150
+ else:
151
+ special_k.append(k)
152
+ connection_all.append([])
153
+
154
+ # last number in each row is the total parts number of that person
155
+ # the second last number in each row is the score of the overall configuration
156
+ subset = -1 * np.ones((0, 20))
157
+ candidate = np.array([item for sublist in all_peaks for item in sublist])
158
+
159
+ for k in range(len(mapIdx)):
160
+ if k not in special_k:
161
+ partAs = connection_all[k][:, 0]
162
+ partBs = connection_all[k][:, 1]
163
+ indexA, indexB = np.array(limbSeq[k]) - 1
164
+
165
+ for i in range(len(connection_all[k])): # = 1:size(temp,1)
166
+ found = 0
167
+ subset_idx = [-1, -1]
168
+ for j in range(len(subset)): # 1:size(subset,1):
169
+ if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
170
+ subset_idx[found] = j
171
+ found += 1
172
+
173
+ if found == 1:
174
+ j = subset_idx[0]
175
+ if subset[j][indexB] != partBs[i]:
176
+ subset[j][indexB] = partBs[i]
177
+ subset[j][-1] += 1
178
+ subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
179
+ elif found == 2: # if found 2 and disjoint, merge them
180
+ j1, j2 = subset_idx
181
+ membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
182
+ if len(np.nonzero(membership == 2)[0]) == 0: # merge
183
+ subset[j1][:-2] += (subset[j2][:-2] + 1)
184
+ subset[j1][-2:] += subset[j2][-2:]
185
+ subset[j1][-2] += connection_all[k][i][2]
186
+ subset = np.delete(subset, j2, 0)
187
+ else: # as like found == 1
188
+ subset[j1][indexB] = partBs[i]
189
+ subset[j1][-1] += 1
190
+ subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
191
+
192
+ # if find no partA in the subset, create a new subset
193
+ elif not found and k < 17:
194
+ row = -1 * np.ones(20)
195
+ row[indexA] = partAs[i]
196
+ row[indexB] = partBs[i]
197
+ row[-1] = 2
198
+ row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
199
+ subset = np.vstack([subset, row])
200
+ # delete some rows of subset which has few parts occur
201
+ deleteIdx = []
202
+ for i in range(len(subset)):
203
+ if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
204
+ deleteIdx.append(i)
205
+ subset = np.delete(subset, deleteIdx, axis=0)
206
+
207
+ # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
208
+ # candidate: x, y, score, id
209
+ return candidate, subset
210
+
211
+ if __name__ == "__main__":
212
+ body_estimation = Body('../model/body_pose_model.pth')
213
+
214
+ test_image = '../images/ski.jpg'
215
+ oriImg = cv2.imread(test_image) # B,G,R order
216
+ candidate, subset = body_estimation(oriImg)
217
+ canvas = util.draw_bodypose(oriImg, candidate, subset)
218
+ plt.imshow(canvas[:, :, [2, 1, 0]])
219
+ plt.show()
text2tex/models/ControlNet/annotator/openpose/hand.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import json
3
+ import numpy as np
4
+ import math
5
+ import time
6
+ from scipy.ndimage.filters import gaussian_filter
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib
9
+ import torch
10
+ from skimage.measure import label
11
+
12
+ from .model import handpose_model
13
+ from . import util
14
+
15
+ class Hand(object):
16
+ def __init__(self, model_path):
17
+ self.model = handpose_model()
18
+ if torch.cuda.is_available():
19
+ self.model = self.model.cuda()
20
+ print('cuda')
21
+ model_dict = util.transfer(self.model, torch.load(model_path))
22
+ self.model.load_state_dict(model_dict)
23
+ self.model.eval()
24
+
25
+ def __call__(self, oriImg):
26
+ scale_search = [0.5, 1.0, 1.5, 2.0]
27
+ # scale_search = [0.5]
28
+ boxsize = 368
29
+ stride = 8
30
+ padValue = 128
31
+ thre = 0.05
32
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
33
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
34
+ # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
35
+
36
+ for m in range(len(multiplier)):
37
+ scale = multiplier[m]
38
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
39
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
40
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
41
+ im = np.ascontiguousarray(im)
42
+
43
+ data = torch.from_numpy(im).float()
44
+ if torch.cuda.is_available():
45
+ data = data.cuda()
46
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
47
+ with torch.no_grad():
48
+ output = self.model(data).cpu().numpy()
49
+ # output = self.model(data).numpy()q
50
+
51
+ # extract outputs, resize, and remove padding
52
+ heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
53
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
54
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
55
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
56
+
57
+ heatmap_avg += heatmap / len(multiplier)
58
+
59
+ all_peaks = []
60
+ for part in range(21):
61
+ map_ori = heatmap_avg[:, :, part]
62
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
63
+ binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
64
+ # 全部小于阈值
65
+ if np.sum(binary) == 0:
66
+ all_peaks.append([0, 0])
67
+ continue
68
+ label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim)
69
+ max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1
70
+ label_img[label_img != max_index] = 0
71
+ map_ori[label_img == 0] = 0
72
+
73
+ y, x = util.npmax(map_ori)
74
+ all_peaks.append([x, y])
75
+ return np.array(all_peaks)
76
+
77
+ if __name__ == "__main__":
78
+ hand_estimation = Hand('../model/hand_pose_model.pth')
79
+
80
+ # test_image = '../images/hand.jpg'
81
+ test_image = '../images/hand.jpg'
82
+ oriImg = cv2.imread(test_image) # B,G,R order
83
+ peaks = hand_estimation(oriImg)
84
+ canvas = util.draw_handpose(oriImg, peaks, True)
85
+ cv2.imshow('', canvas)
86
+ cv2.waitKey(0)
text2tex/models/ControlNet/annotator/openpose/model.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from collections import OrderedDict
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ def make_layers(block, no_relu_layers):
8
+ layers = []
9
+ for layer_name, v in block.items():
10
+ if 'pool' in layer_name:
11
+ layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
12
+ padding=v[2])
13
+ layers.append((layer_name, layer))
14
+ else:
15
+ conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
16
+ kernel_size=v[2], stride=v[3],
17
+ padding=v[4])
18
+ layers.append((layer_name, conv2d))
19
+ if layer_name not in no_relu_layers:
20
+ layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
21
+
22
+ return nn.Sequential(OrderedDict(layers))
23
+
24
+ class bodypose_model(nn.Module):
25
+ def __init__(self):
26
+ super(bodypose_model, self).__init__()
27
+
28
+ # these layers have no relu layer
29
+ no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
30
+ 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
31
+ 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
32
+ 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
33
+ blocks = {}
34
+ block0 = OrderedDict([
35
+ ('conv1_1', [3, 64, 3, 1, 1]),
36
+ ('conv1_2', [64, 64, 3, 1, 1]),
37
+ ('pool1_stage1', [2, 2, 0]),
38
+ ('conv2_1', [64, 128, 3, 1, 1]),
39
+ ('conv2_2', [128, 128, 3, 1, 1]),
40
+ ('pool2_stage1', [2, 2, 0]),
41
+ ('conv3_1', [128, 256, 3, 1, 1]),
42
+ ('conv3_2', [256, 256, 3, 1, 1]),
43
+ ('conv3_3', [256, 256, 3, 1, 1]),
44
+ ('conv3_4', [256, 256, 3, 1, 1]),
45
+ ('pool3_stage1', [2, 2, 0]),
46
+ ('conv4_1', [256, 512, 3, 1, 1]),
47
+ ('conv4_2', [512, 512, 3, 1, 1]),
48
+ ('conv4_3_CPM', [512, 256, 3, 1, 1]),
49
+ ('conv4_4_CPM', [256, 128, 3, 1, 1])
50
+ ])
51
+
52
+
53
+ # Stage 1
54
+ block1_1 = OrderedDict([
55
+ ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
56
+ ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
57
+ ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
58
+ ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
59
+ ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
60
+ ])
61
+
62
+ block1_2 = OrderedDict([
63
+ ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
64
+ ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
65
+ ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
66
+ ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
67
+ ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
68
+ ])
69
+ blocks['block1_1'] = block1_1
70
+ blocks['block1_2'] = block1_2
71
+
72
+ self.model0 = make_layers(block0, no_relu_layers)
73
+
74
+ # Stages 2 - 6
75
+ for i in range(2, 7):
76
+ blocks['block%d_1' % i] = OrderedDict([
77
+ ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
78
+ ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
79
+ ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
80
+ ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
81
+ ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
82
+ ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
83
+ ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
84
+ ])
85
+
86
+ blocks['block%d_2' % i] = OrderedDict([
87
+ ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
88
+ ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
89
+ ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
90
+ ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
91
+ ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
92
+ ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
93
+ ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
94
+ ])
95
+
96
+ for k in blocks.keys():
97
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
98
+
99
+ self.model1_1 = blocks['block1_1']
100
+ self.model2_1 = blocks['block2_1']
101
+ self.model3_1 = blocks['block3_1']
102
+ self.model4_1 = blocks['block4_1']
103
+ self.model5_1 = blocks['block5_1']
104
+ self.model6_1 = blocks['block6_1']
105
+
106
+ self.model1_2 = blocks['block1_2']
107
+ self.model2_2 = blocks['block2_2']
108
+ self.model3_2 = blocks['block3_2']
109
+ self.model4_2 = blocks['block4_2']
110
+ self.model5_2 = blocks['block5_2']
111
+ self.model6_2 = blocks['block6_2']
112
+
113
+
114
+ def forward(self, x):
115
+
116
+ out1 = self.model0(x)
117
+
118
+ out1_1 = self.model1_1(out1)
119
+ out1_2 = self.model1_2(out1)
120
+ out2 = torch.cat([out1_1, out1_2, out1], 1)
121
+
122
+ out2_1 = self.model2_1(out2)
123
+ out2_2 = self.model2_2(out2)
124
+ out3 = torch.cat([out2_1, out2_2, out1], 1)
125
+
126
+ out3_1 = self.model3_1(out3)
127
+ out3_2 = self.model3_2(out3)
128
+ out4 = torch.cat([out3_1, out3_2, out1], 1)
129
+
130
+ out4_1 = self.model4_1(out4)
131
+ out4_2 = self.model4_2(out4)
132
+ out5 = torch.cat([out4_1, out4_2, out1], 1)
133
+
134
+ out5_1 = self.model5_1(out5)
135
+ out5_2 = self.model5_2(out5)
136
+ out6 = torch.cat([out5_1, out5_2, out1], 1)
137
+
138
+ out6_1 = self.model6_1(out6)
139
+ out6_2 = self.model6_2(out6)
140
+
141
+ return out6_1, out6_2
142
+
143
+ class handpose_model(nn.Module):
144
+ def __init__(self):
145
+ super(handpose_model, self).__init__()
146
+
147
+ # these layers have no relu layer
148
+ no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
149
+ 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
150
+ # stage 1
151
+ block1_0 = OrderedDict([
152
+ ('conv1_1', [3, 64, 3, 1, 1]),
153
+ ('conv1_2', [64, 64, 3, 1, 1]),
154
+ ('pool1_stage1', [2, 2, 0]),
155
+ ('conv2_1', [64, 128, 3, 1, 1]),
156
+ ('conv2_2', [128, 128, 3, 1, 1]),
157
+ ('pool2_stage1', [2, 2, 0]),
158
+ ('conv3_1', [128, 256, 3, 1, 1]),
159
+ ('conv3_2', [256, 256, 3, 1, 1]),
160
+ ('conv3_3', [256, 256, 3, 1, 1]),
161
+ ('conv3_4', [256, 256, 3, 1, 1]),
162
+ ('pool3_stage1', [2, 2, 0]),
163
+ ('conv4_1', [256, 512, 3, 1, 1]),
164
+ ('conv4_2', [512, 512, 3, 1, 1]),
165
+ ('conv4_3', [512, 512, 3, 1, 1]),
166
+ ('conv4_4', [512, 512, 3, 1, 1]),
167
+ ('conv5_1', [512, 512, 3, 1, 1]),
168
+ ('conv5_2', [512, 512, 3, 1, 1]),
169
+ ('conv5_3_CPM', [512, 128, 3, 1, 1])
170
+ ])
171
+
172
+ block1_1 = OrderedDict([
173
+ ('conv6_1_CPM', [128, 512, 1, 1, 0]),
174
+ ('conv6_2_CPM', [512, 22, 1, 1, 0])
175
+ ])
176
+
177
+ blocks = {}
178
+ blocks['block1_0'] = block1_0
179
+ blocks['block1_1'] = block1_1
180
+
181
+ # stage 2-6
182
+ for i in range(2, 7):
183
+ blocks['block%d' % i] = OrderedDict([
184
+ ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
185
+ ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
186
+ ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
187
+ ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
188
+ ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
189
+ ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
190
+ ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
191
+ ])
192
+
193
+ for k in blocks.keys():
194
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
195
+
196
+ self.model1_0 = blocks['block1_0']
197
+ self.model1_1 = blocks['block1_1']
198
+ self.model2 = blocks['block2']
199
+ self.model3 = blocks['block3']
200
+ self.model4 = blocks['block4']
201
+ self.model5 = blocks['block5']
202
+ self.model6 = blocks['block6']
203
+
204
+ def forward(self, x):
205
+ out1_0 = self.model1_0(x)
206
+ out1_1 = self.model1_1(out1_0)
207
+ concat_stage2 = torch.cat([out1_1, out1_0], 1)
208
+ out_stage2 = self.model2(concat_stage2)
209
+ concat_stage3 = torch.cat([out_stage2, out1_0], 1)
210
+ out_stage3 = self.model3(concat_stage3)
211
+ concat_stage4 = torch.cat([out_stage3, out1_0], 1)
212
+ out_stage4 = self.model4(concat_stage4)
213
+ concat_stage5 = torch.cat([out_stage4, out1_0], 1)
214
+ out_stage5 = self.model5(concat_stage5)
215
+ concat_stage6 = torch.cat([out_stage5, out1_0], 1)
216
+ out_stage6 = self.model6(concat_stage6)
217
+ return out_stage6
218
+
219
+
text2tex/models/ControlNet/annotator/openpose/util.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import matplotlib
4
+ import cv2
5
+
6
+
7
+ def padRightDownCorner(img, stride, padValue):
8
+ h = img.shape[0]
9
+ w = img.shape[1]
10
+
11
+ pad = 4 * [None]
12
+ pad[0] = 0 # up
13
+ pad[1] = 0 # left
14
+ pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
15
+ pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
16
+
17
+ img_padded = img
18
+ pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
19
+ img_padded = np.concatenate((pad_up, img_padded), axis=0)
20
+ pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
21
+ img_padded = np.concatenate((pad_left, img_padded), axis=1)
22
+ pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
23
+ img_padded = np.concatenate((img_padded, pad_down), axis=0)
24
+ pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
25
+ img_padded = np.concatenate((img_padded, pad_right), axis=1)
26
+
27
+ return img_padded, pad
28
+
29
+ # transfer caffe model to pytorch which will match the layer name
30
+ def transfer(model, model_weights):
31
+ transfered_model_weights = {}
32
+ for weights_name in model.state_dict().keys():
33
+ transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
34
+ return transfered_model_weights
35
+
36
+ # draw the body keypoint and lims
37
+ def draw_bodypose(canvas, candidate, subset):
38
+ stickwidth = 4
39
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
40
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
41
+ [1, 16], [16, 18], [3, 17], [6, 18]]
42
+
43
+ colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
44
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
45
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
46
+ for i in range(18):
47
+ for n in range(len(subset)):
48
+ index = int(subset[n][i])
49
+ if index == -1:
50
+ continue
51
+ x, y = candidate[index][0:2]
52
+ cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
53
+ for i in range(17):
54
+ for n in range(len(subset)):
55
+ index = subset[n][np.array(limbSeq[i]) - 1]
56
+ if -1 in index:
57
+ continue
58
+ cur_canvas = canvas.copy()
59
+ Y = candidate[index.astype(int), 0]
60
+ X = candidate[index.astype(int), 1]
61
+ mX = np.mean(X)
62
+ mY = np.mean(Y)
63
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
64
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
65
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
66
+ cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
67
+ canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
68
+ # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
69
+ # plt.imshow(canvas[:, :, [2, 1, 0]])
70
+ return canvas
71
+
72
+
73
+ # image drawed by opencv is not good.
74
+ def draw_handpose(canvas, all_hand_peaks, show_number=False):
75
+ edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
76
+ [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
77
+
78
+ for peaks in all_hand_peaks:
79
+ for ie, e in enumerate(edges):
80
+ if np.sum(np.all(peaks[e], axis=1)==0)==0:
81
+ x1, y1 = peaks[e[0]]
82
+ x2, y2 = peaks[e[1]]
83
+ cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
84
+
85
+ for i, keyponit in enumerate(peaks):
86
+ x, y = keyponit
87
+ cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
88
+ if show_number:
89
+ cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
90
+ return canvas
91
+
92
+ # detect hand according to body pose keypoints
93
+ # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
94
+ def handDetect(candidate, subset, oriImg):
95
+ # right hand: wrist 4, elbow 3, shoulder 2
96
+ # left hand: wrist 7, elbow 6, shoulder 5
97
+ ratioWristElbow = 0.33
98
+ detect_result = []
99
+ image_height, image_width = oriImg.shape[0:2]
100
+ for person in subset.astype(int):
101
+ # if any of three not detected
102
+ has_left = np.sum(person[[5, 6, 7]] == -1) == 0
103
+ has_right = np.sum(person[[2, 3, 4]] == -1) == 0
104
+ if not (has_left or has_right):
105
+ continue
106
+ hands = []
107
+ #left hand
108
+ if has_left:
109
+ left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
110
+ x1, y1 = candidate[left_shoulder_index][:2]
111
+ x2, y2 = candidate[left_elbow_index][:2]
112
+ x3, y3 = candidate[left_wrist_index][:2]
113
+ hands.append([x1, y1, x2, y2, x3, y3, True])
114
+ # right hand
115
+ if has_right:
116
+ right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
117
+ x1, y1 = candidate[right_shoulder_index][:2]
118
+ x2, y2 = candidate[right_elbow_index][:2]
119
+ x3, y3 = candidate[right_wrist_index][:2]
120
+ hands.append([x1, y1, x2, y2, x3, y3, False])
121
+
122
+ for x1, y1, x2, y2, x3, y3, is_left in hands:
123
+ # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
124
+ # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
125
+ # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
126
+ # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
127
+ # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
128
+ # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
129
+ x = x3 + ratioWristElbow * (x3 - x2)
130
+ y = y3 + ratioWristElbow * (y3 - y2)
131
+ distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
132
+ distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
133
+ width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
134
+ # x-y refers to the center --> offset to topLeft point
135
+ # handRectangle.x -= handRectangle.width / 2.f;
136
+ # handRectangle.y -= handRectangle.height / 2.f;
137
+ x -= width / 2
138
+ y -= width / 2 # width = height
139
+ # overflow the image
140
+ if x < 0: x = 0
141
+ if y < 0: y = 0
142
+ width1 = width
143
+ width2 = width
144
+ if x + width > image_width: width1 = image_width - x
145
+ if y + width > image_height: width2 = image_height - y
146
+ width = min(width1, width2)
147
+ # the max hand box value is 20 pixels
148
+ if width >= 20:
149
+ detect_result.append([int(x), int(y), int(width), is_left])
150
+
151
+ '''
152
+ return value: [[x, y, w, True if left hand else False]].
153
+ width=height since the network require squared input.
154
+ x, y is the coordinate of top left
155
+ '''
156
+ return detect_result
157
+
158
+ # get max index of 2d array
159
+ def npmax(array):
160
+ arrayindex = array.argmax(1)
161
+ arrayvalue = array.max(1)
162
+ i = arrayvalue.argmax()
163
+ j = arrayindex[i]
164
+ return i, j
text2tex/models/ControlNet/annotator/uniformer/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
2
+ from annotator.uniformer.mmseg.core.evaluation import get_palette
3
+
4
+
5
+ checkpoint_file = "annotator/ckpts/upernet_global_small.pth"
6
+ config_file = 'annotator/uniformer/exp/upernet_global_small/config.py'
7
+ model = init_segmentor(config_file, checkpoint_file).cuda()
8
+
9
+
10
+ def apply_uniformer(img):
11
+ result = inference_segmentor(model, img)
12
+ res_img = show_result_pyplot(model, img, result, get_palette('ade'), opacity=1)
13
+ return res_img
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/ade20k.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'ADE20KDataset'
3
+ data_root = 'data/ade/ADEChallengeData2016'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+ crop_size = (512, 512)
7
+ train_pipeline = [
8
+ dict(type='LoadImageFromFile'),
9
+ dict(type='LoadAnnotations', reduce_zero_label=True),
10
+ dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
11
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
12
+ dict(type='RandomFlip', prob=0.5),
13
+ dict(type='PhotoMetricDistortion'),
14
+ dict(type='Normalize', **img_norm_cfg),
15
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
16
+ dict(type='DefaultFormatBundle'),
17
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
18
+ ]
19
+ test_pipeline = [
20
+ dict(type='LoadImageFromFile'),
21
+ dict(
22
+ type='MultiScaleFlipAug',
23
+ img_scale=(2048, 512),
24
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
25
+ flip=False,
26
+ transforms=[
27
+ dict(type='Resize', keep_ratio=True),
28
+ dict(type='RandomFlip'),
29
+ dict(type='Normalize', **img_norm_cfg),
30
+ dict(type='ImageToTensor', keys=['img']),
31
+ dict(type='Collect', keys=['img']),
32
+ ])
33
+ ]
34
+ data = dict(
35
+ samples_per_gpu=4,
36
+ workers_per_gpu=4,
37
+ train=dict(
38
+ type=dataset_type,
39
+ data_root=data_root,
40
+ img_dir='images/training',
41
+ ann_dir='annotations/training',
42
+ pipeline=train_pipeline),
43
+ val=dict(
44
+ type=dataset_type,
45
+ data_root=data_root,
46
+ img_dir='images/validation',
47
+ ann_dir='annotations/validation',
48
+ pipeline=test_pipeline),
49
+ test=dict(
50
+ type=dataset_type,
51
+ data_root=data_root,
52
+ img_dir='images/validation',
53
+ ann_dir='annotations/validation',
54
+ pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/chase_db1.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'ChaseDB1Dataset'
3
+ data_root = 'data/CHASE_DB1'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+ img_scale = (960, 999)
7
+ crop_size = (128, 128)
8
+ train_pipeline = [
9
+ dict(type='LoadImageFromFile'),
10
+ dict(type='LoadAnnotations'),
11
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13
+ dict(type='RandomFlip', prob=0.5),
14
+ dict(type='PhotoMetricDistortion'),
15
+ dict(type='Normalize', **img_norm_cfg),
16
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17
+ dict(type='DefaultFormatBundle'),
18
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19
+ ]
20
+ test_pipeline = [
21
+ dict(type='LoadImageFromFile'),
22
+ dict(
23
+ type='MultiScaleFlipAug',
24
+ img_scale=img_scale,
25
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26
+ flip=False,
27
+ transforms=[
28
+ dict(type='Resize', keep_ratio=True),
29
+ dict(type='RandomFlip'),
30
+ dict(type='Normalize', **img_norm_cfg),
31
+ dict(type='ImageToTensor', keys=['img']),
32
+ dict(type='Collect', keys=['img'])
33
+ ])
34
+ ]
35
+
36
+ data = dict(
37
+ samples_per_gpu=4,
38
+ workers_per_gpu=4,
39
+ train=dict(
40
+ type='RepeatDataset',
41
+ times=40000,
42
+ dataset=dict(
43
+ type=dataset_type,
44
+ data_root=data_root,
45
+ img_dir='images/training',
46
+ ann_dir='annotations/training',
47
+ pipeline=train_pipeline)),
48
+ val=dict(
49
+ type=dataset_type,
50
+ data_root=data_root,
51
+ img_dir='images/validation',
52
+ ann_dir='annotations/validation',
53
+ pipeline=test_pipeline),
54
+ test=dict(
55
+ type=dataset_type,
56
+ data_root=data_root,
57
+ img_dir='images/validation',
58
+ ann_dir='annotations/validation',
59
+ pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'CityscapesDataset'
3
+ data_root = 'data/cityscapes/'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+ crop_size = (512, 1024)
7
+ train_pipeline = [
8
+ dict(type='LoadImageFromFile'),
9
+ dict(type='LoadAnnotations'),
10
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
11
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
12
+ dict(type='RandomFlip', prob=0.5),
13
+ dict(type='PhotoMetricDistortion'),
14
+ dict(type='Normalize', **img_norm_cfg),
15
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
16
+ dict(type='DefaultFormatBundle'),
17
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
18
+ ]
19
+ test_pipeline = [
20
+ dict(type='LoadImageFromFile'),
21
+ dict(
22
+ type='MultiScaleFlipAug',
23
+ img_scale=(2048, 1024),
24
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
25
+ flip=False,
26
+ transforms=[
27
+ dict(type='Resize', keep_ratio=True),
28
+ dict(type='RandomFlip'),
29
+ dict(type='Normalize', **img_norm_cfg),
30
+ dict(type='ImageToTensor', keys=['img']),
31
+ dict(type='Collect', keys=['img']),
32
+ ])
33
+ ]
34
+ data = dict(
35
+ samples_per_gpu=2,
36
+ workers_per_gpu=2,
37
+ train=dict(
38
+ type=dataset_type,
39
+ data_root=data_root,
40
+ img_dir='leftImg8bit/train',
41
+ ann_dir='gtFine/train',
42
+ pipeline=train_pipeline),
43
+ val=dict(
44
+ type=dataset_type,
45
+ data_root=data_root,
46
+ img_dir='leftImg8bit/val',
47
+ ann_dir='gtFine/val',
48
+ pipeline=test_pipeline),
49
+ test=dict(
50
+ type=dataset_type,
51
+ data_root=data_root,
52
+ img_dir='leftImg8bit/val',
53
+ ann_dir='gtFine/val',
54
+ pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _base_ = './cityscapes.py'
2
+ img_norm_cfg = dict(
3
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
4
+ crop_size = (769, 769)
5
+ train_pipeline = [
6
+ dict(type='LoadImageFromFile'),
7
+ dict(type='LoadAnnotations'),
8
+ dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
9
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
10
+ dict(type='RandomFlip', prob=0.5),
11
+ dict(type='PhotoMetricDistortion'),
12
+ dict(type='Normalize', **img_norm_cfg),
13
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
14
+ dict(type='DefaultFormatBundle'),
15
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
16
+ ]
17
+ test_pipeline = [
18
+ dict(type='LoadImageFromFile'),
19
+ dict(
20
+ type='MultiScaleFlipAug',
21
+ img_scale=(2049, 1025),
22
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
23
+ flip=False,
24
+ transforms=[
25
+ dict(type='Resize', keep_ratio=True),
26
+ dict(type='RandomFlip'),
27
+ dict(type='Normalize', **img_norm_cfg),
28
+ dict(type='ImageToTensor', keys=['img']),
29
+ dict(type='Collect', keys=['img']),
30
+ ])
31
+ ]
32
+ data = dict(
33
+ train=dict(pipeline=train_pipeline),
34
+ val=dict(pipeline=test_pipeline),
35
+ test=dict(pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/drive.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'DRIVEDataset'
3
+ data_root = 'data/DRIVE'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+ img_scale = (584, 565)
7
+ crop_size = (64, 64)
8
+ train_pipeline = [
9
+ dict(type='LoadImageFromFile'),
10
+ dict(type='LoadAnnotations'),
11
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13
+ dict(type='RandomFlip', prob=0.5),
14
+ dict(type='PhotoMetricDistortion'),
15
+ dict(type='Normalize', **img_norm_cfg),
16
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17
+ dict(type='DefaultFormatBundle'),
18
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19
+ ]
20
+ test_pipeline = [
21
+ dict(type='LoadImageFromFile'),
22
+ dict(
23
+ type='MultiScaleFlipAug',
24
+ img_scale=img_scale,
25
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26
+ flip=False,
27
+ transforms=[
28
+ dict(type='Resize', keep_ratio=True),
29
+ dict(type='RandomFlip'),
30
+ dict(type='Normalize', **img_norm_cfg),
31
+ dict(type='ImageToTensor', keys=['img']),
32
+ dict(type='Collect', keys=['img'])
33
+ ])
34
+ ]
35
+
36
+ data = dict(
37
+ samples_per_gpu=4,
38
+ workers_per_gpu=4,
39
+ train=dict(
40
+ type='RepeatDataset',
41
+ times=40000,
42
+ dataset=dict(
43
+ type=dataset_type,
44
+ data_root=data_root,
45
+ img_dir='images/training',
46
+ ann_dir='annotations/training',
47
+ pipeline=train_pipeline)),
48
+ val=dict(
49
+ type=dataset_type,
50
+ data_root=data_root,
51
+ img_dir='images/validation',
52
+ ann_dir='annotations/validation',
53
+ pipeline=test_pipeline),
54
+ test=dict(
55
+ type=dataset_type,
56
+ data_root=data_root,
57
+ img_dir='images/validation',
58
+ ann_dir='annotations/validation',
59
+ pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/hrf.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'HRFDataset'
3
+ data_root = 'data/HRF'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+ img_scale = (2336, 3504)
7
+ crop_size = (256, 256)
8
+ train_pipeline = [
9
+ dict(type='LoadImageFromFile'),
10
+ dict(type='LoadAnnotations'),
11
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13
+ dict(type='RandomFlip', prob=0.5),
14
+ dict(type='PhotoMetricDistortion'),
15
+ dict(type='Normalize', **img_norm_cfg),
16
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17
+ dict(type='DefaultFormatBundle'),
18
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19
+ ]
20
+ test_pipeline = [
21
+ dict(type='LoadImageFromFile'),
22
+ dict(
23
+ type='MultiScaleFlipAug',
24
+ img_scale=img_scale,
25
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26
+ flip=False,
27
+ transforms=[
28
+ dict(type='Resize', keep_ratio=True),
29
+ dict(type='RandomFlip'),
30
+ dict(type='Normalize', **img_norm_cfg),
31
+ dict(type='ImageToTensor', keys=['img']),
32
+ dict(type='Collect', keys=['img'])
33
+ ])
34
+ ]
35
+
36
+ data = dict(
37
+ samples_per_gpu=4,
38
+ workers_per_gpu=4,
39
+ train=dict(
40
+ type='RepeatDataset',
41
+ times=40000,
42
+ dataset=dict(
43
+ type=dataset_type,
44
+ data_root=data_root,
45
+ img_dir='images/training',
46
+ ann_dir='annotations/training',
47
+ pipeline=train_pipeline)),
48
+ val=dict(
49
+ type=dataset_type,
50
+ data_root=data_root,
51
+ img_dir='images/validation',
52
+ ann_dir='annotations/validation',
53
+ pipeline=test_pipeline),
54
+ test=dict(
55
+ type=dataset_type,
56
+ data_root=data_root,
57
+ img_dir='images/validation',
58
+ ann_dir='annotations/validation',
59
+ pipeline=test_pipeline))
text2tex/models/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # dataset settings
2
+ dataset_type = 'PascalContextDataset'
3
+ data_root = 'data/VOCdevkit/VOC2010/'
4
+ img_norm_cfg = dict(
5
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
+
7
+ img_scale = (520, 520)
8
+ crop_size = (480, 480)
9
+
10
+ train_pipeline = [
11
+ dict(type='LoadImageFromFile'),
12
+ dict(type='LoadAnnotations'),
13
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
14
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
15
+ dict(type='RandomFlip', prob=0.5),
16
+ dict(type='PhotoMetricDistortion'),
17
+ dict(type='Normalize', **img_norm_cfg),
18
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
19
+ dict(type='DefaultFormatBundle'),
20
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
21
+ ]
22
+ test_pipeline = [
23
+ dict(type='LoadImageFromFile'),
24
+ dict(
25
+ type='MultiScaleFlipAug',
26
+ img_scale=img_scale,
27
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
28
+ flip=False,
29
+ transforms=[
30
+ dict(type='Resize', keep_ratio=True),
31
+ dict(type='RandomFlip'),
32
+ dict(type='Normalize', **img_norm_cfg),
33
+ dict(type='ImageToTensor', keys=['img']),
34
+ dict(type='Collect', keys=['img']),
35
+ ])
36
+ ]
37
+ data = dict(
38
+ samples_per_gpu=4,
39
+ workers_per_gpu=4,
40
+ train=dict(
41
+ type=dataset_type,
42
+ data_root=data_root,
43
+ img_dir='JPEGImages',
44
+ ann_dir='SegmentationClassContext',
45
+ split='ImageSets/SegmentationContext/train.txt',
46
+ pipeline=train_pipeline),
47
+ val=dict(
48
+ type=dataset_type,
49
+ data_root=data_root,
50
+ img_dir='JPEGImages',
51
+ ann_dir='SegmentationClassContext',
52
+ split='ImageSets/SegmentationContext/val.txt',
53
+ pipeline=test_pipeline),
54
+ test=dict(
55
+ type=dataset_type,
56
+ data_root=data_root,
57
+ img_dir='JPEGImages',
58
+ ann_dir='SegmentationClassContext',
59
+ split='ImageSets/SegmentationContext/val.txt',
60
+ pipeline=test_pipeline))