-
Notifications
You must be signed in to change notification settings - Fork 125
/
visualize.py
238 lines (203 loc) · 9.23 KB
/
visualize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
import torch
import numpy as np
import open3d as o3d
import time
from diff_gaussian_rasterization import GaussianRasterizer as Renderer
from helpers import setup_camera, quat_mult
from external import build_rotation
from colormap import colormap
from copy import deepcopy
RENDER_MODE = 'color' # 'color', 'depth' or 'centers'
# RENDER_MODE = 'depth' # 'color', 'depth' or 'centers'
# RENDER_MODE = 'centers' # 'color', 'depth' or 'centers'
ADDITIONAL_LINES = None # None, 'trajectories' or 'rotations'
# ADDITIONAL_LINES = 'trajectories' # None, 'trajectories' or 'rotations'
# ADDITIONAL_LINES = 'rotations' # None, 'trajectories' or 'rotations'
REMOVE_BACKGROUND = False # False or True
# REMOVE_BACKGROUND = True # False or True
FORCE_LOOP = False # False or True
# FORCE_LOOP = True # False or True
w, h = 640, 360
near, far = 0.01, 100.0
view_scale = 3.9
fps = 20
traj_frac = 25 # 4% of points
traj_length = 15
def_pix = torch.tensor(
np.stack(np.meshgrid(np.arange(w) + 0.5, np.arange(h) + 0.5, 1), -1).reshape(-1, 3)).cuda().float()
pix_ones = torch.ones(h * w, 1).cuda().float()
def init_camera(y_angle=0., center_dist=2.4, cam_height=1.3, f_ratio=0.82):
ry = y_angle * np.pi / 180
w2c = np.array([[np.cos(ry), 0., -np.sin(ry), 0.],
[0., 1., 0., cam_height],
[np.sin(ry), 0., np.cos(ry), center_dist],
[0., 0., 0., 1.]])
k = np.array([[f_ratio * w, 0, w / 2], [0, f_ratio * w, h / 2], [0, 0, 1]])
return w2c, k
def load_scene_data(seq, exp, seg_as_col=False):
params = dict(np.load(f"./output/{exp}/{seq}/params.npz"))
params = {k: torch.tensor(v).cuda().float() for k, v in params.items()}
is_fg = params['seg_colors'][:, 0] > 0.5
scene_data = []
for t in range(len(params['means3D'])):
rendervar = {
'means3D': params['means3D'][t],
'colors_precomp': params['rgb_colors'][t] if not seg_as_col else params['seg_colors'],
'rotations': torch.nn.functional.normalize(params['unnorm_rotations'][t]),
'opacities': torch.sigmoid(params['logit_opacities']),
'scales': torch.exp(params['log_scales']),
'means2D': torch.zeros_like(params['means3D'][0], device="cuda")
}
if REMOVE_BACKGROUND:
rendervar = {k: v[is_fg] for k, v in rendervar.items()}
scene_data.append(rendervar)
if REMOVE_BACKGROUND:
is_fg = is_fg[is_fg]
return scene_data, is_fg
def make_lineset(all_pts, cols, num_lines):
linesets = []
for pts in all_pts:
lineset = o3d.geometry.LineSet()
lineset.points = o3d.utility.Vector3dVector(np.ascontiguousarray(pts, np.float64))
lineset.colors = o3d.utility.Vector3dVector(np.ascontiguousarray(cols, np.float64))
pt_indices = np.arange(len(lineset.points))
line_indices = np.stack((pt_indices, pt_indices - num_lines), -1)[num_lines:]
lineset.lines = o3d.utility.Vector2iVector(np.ascontiguousarray(line_indices, np.int32))
linesets.append(lineset)
return linesets
def calculate_trajectories(scene_data, is_fg):
in_pts = [data['means3D'][is_fg][::traj_frac].contiguous().float().cpu().numpy() for data in scene_data]
num_lines = len(in_pts[0])
cols = np.repeat(colormap[np.arange(len(in_pts[0])) % len(colormap)][None], traj_length, 0).reshape(-1, 3)
out_pts = []
for t in range(len(in_pts))[traj_length:]:
out_pts.append(np.array(in_pts[t - traj_length:t + 1]).reshape(-1, 3))
return make_lineset(out_pts, cols, num_lines)
def calculate_rot_vec(scene_data, is_fg):
in_pts = [data['means3D'][is_fg][::traj_frac].contiguous().float().cpu().numpy() for data in scene_data]
in_rotation = [data['rotations'][is_fg][::traj_frac] for data in scene_data]
num_lines = len(in_pts[0])
cols = colormap[np.arange(num_lines) % len(colormap)]
inv_init_q = deepcopy(in_rotation[0])
inv_init_q[:, 1:] = -1 * inv_init_q[:, 1:]
inv_init_q = inv_init_q / (inv_init_q ** 2).sum(-1)[:, None]
init_vec = np.array([-0.1, 0, 0])
out_pts = []
for t in range(len(in_pts)):
cam_rel_qs = quat_mult(in_rotation[t], inv_init_q)
rot = build_rotation(cam_rel_qs).cpu().numpy()
vec = (rot @ init_vec[None, :, None]).squeeze()
out_pts.append(np.concatenate((in_pts[t] + vec, in_pts[t]), 0))
return make_lineset(out_pts, cols, num_lines)
def render(w2c, k, timestep_data):
with torch.no_grad():
cam = setup_camera(w, h, k, w2c, near, far)
im, _, depth, = Renderer(raster_settings=cam)(**timestep_data)
return im, depth
def rgbd2pcd(im, depth, w2c, k, show_depth=False, project_to_cam_w_scale=None):
d_near = 1.5
d_far = 6
invk = torch.inverse(torch.tensor(k).cuda().float())
c2w = torch.inverse(torch.tensor(w2c).cuda().float())
radial_depth = depth[0].reshape(-1)
def_rays = (invk @ def_pix.T).T
def_radial_rays = def_rays / torch.linalg.norm(def_rays, ord=2, dim=-1)[:, None]
pts_cam = def_radial_rays * radial_depth[:, None]
z_depth = pts_cam[:, 2]
if project_to_cam_w_scale is not None:
pts_cam = project_to_cam_w_scale * pts_cam / z_depth[:, None]
pts4 = torch.concat((pts_cam, pix_ones), 1)
pts = (c2w @ pts4.T).T[:, :3]
if show_depth:
cols = ((z_depth - d_near) / (d_far - d_near))[:, None].repeat(1, 3)
else:
cols = torch.permute(im, (1, 2, 0)).reshape(-1, 3)
pts = o3d.utility.Vector3dVector(pts.contiguous().double().cpu().numpy())
cols = o3d.utility.Vector3dVector(cols.contiguous().double().cpu().numpy())
return pts, cols
def visualize(seq, exp):
scene_data, is_fg = load_scene_data(seq, exp)
vis = o3d.visualization.Visualizer()
vis.create_window(width=int(w * view_scale), height=int(h * view_scale), visible=True)
w2c, k = init_camera()
im, depth = render(w2c, k, scene_data[0])
init_pts, init_cols = rgbd2pcd(im, depth, w2c, k, show_depth=(RENDER_MODE == 'depth'))
pcd = o3d.geometry.PointCloud()
pcd.points = init_pts
pcd.colors = init_cols
vis.add_geometry(pcd)
linesets = None
lines = None
if ADDITIONAL_LINES is not None:
if ADDITIONAL_LINES == 'trajectories':
linesets = calculate_trajectories(scene_data, is_fg)
elif ADDITIONAL_LINES == 'rotations':
linesets = calculate_rot_vec(scene_data, is_fg)
lines = o3d.geometry.LineSet()
lines.points = linesets[0].points
lines.colors = linesets[0].colors
lines.lines = linesets[0].lines
vis.add_geometry(lines)
view_k = k * view_scale
view_k[2, 2] = 1
view_control = vis.get_view_control()
cparams = o3d.camera.PinholeCameraParameters()
cparams.extrinsic = w2c
cparams.intrinsic.intrinsic_matrix = view_k
cparams.intrinsic.height = int(h * view_scale)
cparams.intrinsic.width = int(w * view_scale)
view_control.convert_from_pinhole_camera_parameters(cparams, allow_arbitrary=True)
render_options = vis.get_render_option()
render_options.point_size = view_scale
render_options.light_on = False
start_time = time.time()
num_timesteps = len(scene_data)
while True:
passed_time = time.time() - start_time
passed_frames = passed_time * fps
if ADDITIONAL_LINES == 'trajectories':
t = int(passed_frames % (num_timesteps - traj_length)) + traj_length # Skip t that don't have full traj.
else:
t = int(passed_frames % num_timesteps)
if FORCE_LOOP:
num_loops = 1.4
y_angle = 360*t*num_loops / num_timesteps
w2c, k = init_camera(y_angle)
cam_params = view_control.convert_to_pinhole_camera_parameters()
cam_params.extrinsic = w2c
view_control.convert_from_pinhole_camera_parameters(cam_params, allow_arbitrary=True)
else: # Interactive control
cam_params = view_control.convert_to_pinhole_camera_parameters()
view_k = cam_params.intrinsic.intrinsic_matrix
k = view_k / view_scale
k[2, 2] = 1
w2c = cam_params.extrinsic
if RENDER_MODE == 'centers':
pts = o3d.utility.Vector3dVector(scene_data[t]['means3D'].contiguous().double().cpu().numpy())
cols = o3d.utility.Vector3dVector(scene_data[t]['colors_precomp'].contiguous().double().cpu().numpy())
else:
im, depth = render(w2c, k, scene_data[t])
pts, cols = rgbd2pcd(im, depth, w2c, k, show_depth=(RENDER_MODE == 'depth'))
pcd.points = pts
pcd.colors = cols
vis.update_geometry(pcd)
if ADDITIONAL_LINES is not None:
if ADDITIONAL_LINES == 'trajectories':
lt = t - traj_length
else:
lt = t
lines.points = linesets[lt].points
lines.colors = linesets[lt].colors
lines.lines = linesets[lt].lines
vis.update_geometry(lines)
if not vis.poll_events():
break
vis.update_renderer()
vis.destroy_window()
del view_control
del vis
del render_options
if __name__ == "__main__":
exp_name = "pretrained"
for sequence in ["basketball", "boxes", "football", "juggle", "softball", "tennis"]:
visualize(sequence, exp_name)