This repository has been archived by the owner on Sep 24, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathdenoise.py
202 lines (160 loc) · 7.25 KB
/
denoise.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import os
import torch
import numpy as np
from sklearn.cluster import KMeans
from sklearn.neighbors import kneighbors_graph, KDTree
import argparse
import math
from models.denoise import PointCloudDenoising
from models.utils import *
from tqdm import tqdm
def normalize_pointcloud(v):
center = v.mean(axis=0, keepdims=True)
v = v - center
scale = (1 / np.abs(v).max()) * 0.999999
v = v * scale
return v, center, scale
def run_denoise(pc, patch_size, ckpt, device, random_state=0, expand_knn=16):
pc, center, scale = normalize_pointcloud(pc)
print('[INFO] Center: %s | Scale: %.6f' % (repr(center), scale))
n_clusters = math.ceil(pc.shape[0] / patch_size)
kmeans = KMeans(n_clusters=n_clusters, random_state=random_state, n_jobs=16).fit(pc)
knn_graph = kneighbors_graph(pc, n_neighbors=expand_knn, mode='distance', include_self=False, n_jobs=8)
knn_idx = np.array(knn_graph.tolil().rows.tolist())
patches = []
extra_points = []
for i in range(n_clusters):
pts_idx = kmeans.labels_ == i
expand_idx = np.unique(knn_idx[pts_idx].flatten())
extra_idx = np.setdiff1d(expand_idx, np.where(pts_idx))
patches.append(pc[expand_idx])
extra_points.append(pc[extra_idx])
model = PointCloudDenoising.load_from_checkpoint(ckpt).to(device=device)
denoised_patches = []
downsampled_patches = []
for patch in tqdm(patches):
patch = torch.FloatTensor(patch).unsqueeze(0).to(device=device)
# print(patch.size())
with torch.no_grad():
pred = model(patch)
pred = pred.detach().cpu().reshape(-1, 3).numpy()
denoised_patches.append(pred)
downsampled_patches.append(model.model.adjusted.detach().cpu().reshape(-1, 3).numpy())
denoised = np.concatenate(denoised_patches, axis=0)
downsampled = np.concatenate(downsampled_patches, axis=0)
denoised = (denoised / scale) + center
downsampled = (downsampled / scale) + center
return denoised, downsampled
def run_denoise_middle_pointcloud(pc, num_splits, patch_size, ckpt, device, random_state=0, expand_knn=16):
np.random.shuffle(pc)
split_size = math.floor(pc.shape[0] / num_splits)
splits = []
for i in range(num_splits):
if i < num_splits - 1:
splits.append(pc[i*split_size:(i+1)*split_size])
else:
splits.append(pc[i*split_size:])
denoised = []
downsampled = []
for i, splpc in enumerate(tqdm(splits)):
den, dow = run_denoise(splpc, patch_size, ckpt, device, random_state, expand_knn)
denoised.append(den)
downsampled.append(dow)
return np.vstack(denoised), np.vstack(downsampled)
def run_denoise_large_pointcloud(pc, cluster_size, patch_size, ckpt, device, random_state=0, expand_knn=16):
n_clusters = math.ceil(pc.shape[0] / cluster_size)
kmeans = KMeans(n_clusters=n_clusters, random_state=random_state, n_jobs=16).fit(pc)
knn_graph = kneighbors_graph(pc, n_neighbors=expand_knn, mode='distance', include_self=False, n_jobs=8)
knn_idx = np.array(knn_graph.tolil().rows.tolist())
centers = []
patches = []
# extra_points = []
for i in range(n_clusters):
pts_idx = kmeans.labels_ == i
raw_pc = pc[pts_idx]
centers.append(raw_pc.mean(axis=0, keepdims=True))
expand_idx = np.unique(knn_idx[pts_idx].flatten())
# extra_idx = np.setdiff1d(expand_idx, np.where(pts_idx))
patches.append(pc[expand_idx])
# extra_points.append(pc[extra_idx])
print('[INFO] Cluster Size:', patches[-1].shape[0])
denoised = []
downsampled = []
for i, patch in enumerate(tqdm(patches)):
den, dow = run_denoise(patch - centers[i], patch_size, ckpt, device, random_state, expand_knn)
den += centers[i]
dow += centers[i]
denoised.append(den)
downsampled.append(dow)
return np.vstack(denoised), np.vstack(downsampled)
def run_test(input_fn, output_fn, patch_size, ckpt, device, random_state=0, expand_knn=16, ds_output_fn=None, large=False, cluster_size=30000):
pc = np.loadtxt(input_fn).astype(np.float32)
if not os.path.exists(os.path.dirname(output_fn)):
os.makedirs(os.path.dirname(output_fn))
if large:
denoised, downsampled = run_denoise_large_pointcloud(pc, cluster_size, patch_size, ckpt, device, random_state=random_state, expand_knn=expand_knn)
else:
denoised, downsampled = run_denoise(pc, patch_size, ckpt, device, random_state=random_state, expand_knn=expand_knn)
np.savetxt(output_fn, denoised)
if ds_output_fn is not None:
np.savetxt(ds_output_fn, downsampled)
def auto_denoise(args):
print('[INFO] Loading: %s' % args.input)
pc = np.loadtxt(args.input).astype(np.float32)
if not os.path.exists(os.path.dirname(args.output)):
os.makedirs(os.path.dirname(args.output))
num_points = pc.shape[0]
if num_points >= 120000:
print('[INFO] Denoising large point cloud.')
denoised, downsampled = run_denoise_large_pointcloud(
pc=pc,
cluster_size=args.cluster_size,
patch_size=args.patch_size,
ckpt=args.ckpt,
device=args.device,
random_state=args.seed,
expand_knn=args.expand_knn
)
elif num_points >= 60000:
print('[INFO] Denoising middle-sized point cloud.')
denoised, downsampled = run_denoise_middle_pointcloud(
pc=pc,
num_splits=args.num_splits,
patch_size=args.patch_size,
ckpt=args.ckpt,
device=args.device,
random_state=args.seed,
expand_knn=args.expand_knn
)
elif num_points >= 10000:
print('[INFO] Denoising regular-sized point cloud.')
denoised, downsampled = run_denoise(
pc=pc,
patch_size=args.patch_size,
ckpt=args.ckpt,
device=args.device,
random_state=args.seed,
expand_knn=args.expand_knn
)
else:
assert False, "Our pretrained model does not support point clouds with less than 10K points."
np.savetxt(args.output, denoised)
print('[INFO] Saving to: %s' % args.output)
if args.downsample_output is not None:
np.savetxt(args.downsample_output, downsampled)
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--input', type=str, default='./data/input_full_test_50k_0.010/airplane_0016.obj.xyz')
parser.add_argument('--output', type=str, default='./airplane_0016.denoised.xyz')
parser.add_argument('--ckpt', type=str, default='./pretrained/supervised/epoch=153.ckpt')
parser.add_argument('--downsample_output', type=str, default=None)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--expand_knn', type=int, default=16)
parser.add_argument('--patch_size', type=int, default=1000)
parser.add_argument('--cluster_size', type=int, default=30000,
help='Number of clusters for large point clouds.')
parser.add_argument('--num_splits', type=int, default=2,
help='Number of splits for middle-sized point clouds.')
parser.add_argument('--device', type=str, default='cuda')
args = parser.parse_args()
auto_denoise(args)