-
Notifications
You must be signed in to change notification settings - Fork 12
/
invert.py
289 lines (247 loc) · 11 KB
/
invert.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
import torch.nn as nn
import torch
from tqdm import tqdm
import os
from transformers import logging
from utils import load_config, save_config
from utils import get_controlnet_kwargs, get_latents_dir, init_model, seed_everything
from utils import load_video, prepare_depth, save_frames, control_preprocess
# suppress partial model loading warning
logging.set_verbosity_error()
class Inverter(nn.Module):
def __init__(self, pipe, scheduler, config):
super().__init__()
self.device = config.device
self.use_depth = config.sd_version == "depth"
self.model_key = config.model_key
self.config = config
inv_config = config.inversion
float_precision = inv_config.float_precision if "float_precision" in inv_config else config.float_precision
if float_precision == "fp16":
self.dtype = torch.float16
print("[INFO] float precision fp16. Use torch.float16.")
else:
self.dtype = torch.float32
print("[INFO] float precision fp32. Use torch.float32.")
self.pipe = pipe
self.vae = pipe.vae
self.tokenizer = pipe.tokenizer
self.unet = pipe.unet
self.text_encoder = pipe.text_encoder
if config.enable_xformers_memory_efficient_attention:
try:
pipe.enable_xformers_memory_efficient_attention()
except ModuleNotFoundError:
print("[WARNING] xformers not found. Disable xformers attention.")
self.control = inv_config.control
if self.control != "none":
self.controlnet = pipe.controlnet
self.controlnet_scale = inv_config.control_scale
scheduler.set_timesteps(inv_config.save_steps)
self.timesteps_to_save = scheduler.timesteps
scheduler.set_timesteps(inv_config.steps)
self.scheduler = scheduler
self.prompt=inv_config.prompt
self.recon=inv_config.recon
self.save_latents=inv_config.save_intermediate
self.use_blip=inv_config.use_blip
self.steps=inv_config.steps
self.batch_size = inv_config.batch_size
self.force = inv_config.force
self.n_frames = inv_config.n_frames
self.frame_height, self.frame_width = config.height, config.width
self.work_dir = config.work_dir
@torch.no_grad()
def get_text_embeds(self, prompt, negative_prompt=None, device="cuda"):
text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length,
truncation=True, return_tensors='pt')
text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0]
if negative_prompt is not None:
uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length,
return_tensors='pt')
uncond_embeddings = self.text_encoder(
uncond_input.input_ids.to(device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
return text_embeddings
@torch.no_grad()
def decode_latents(self, latents):
with torch.autocast(device_type=self.device, dtype=self.dtype):
latents = 1 / 0.18215 * latents
imgs = self.vae.decode(latents).sample
imgs = (imgs / 2 + 0.5).clamp(0, 1)
return imgs
@torch.no_grad()
def decode_latents_batch(self, latents):
imgs = []
batch_latents = latents.split(self.batch_size, dim = 0)
for latent in batch_latents:
imgs += [self.decode_latents(latent)]
imgs = torch.cat(imgs)
return imgs
@torch.no_grad()
def encode_imgs(self, imgs):
with torch.autocast(device_type=self.device, dtype=self.dtype):
imgs = 2 * imgs - 1
posterior = self.vae.encode(imgs).latent_dist
latents = posterior.mean * 0.18215
return latents
@torch.no_grad()
def encode_imgs_batch(self, imgs):
latents = []
batch_imgs = imgs.split(self.batch_size, dim = 0)
for img in batch_imgs:
latents += [self.encode_imgs(img)]
latents = torch.cat(latents)
return latents
@torch.no_grad()
def ddim_inversion(self, x, conds, save_path):
print("[INFO] start DDIM Inversion!")
timesteps = reversed(self.scheduler.timesteps)
with torch.autocast(device_type=self.device, dtype=self.dtype):
for i, t in enumerate(tqdm(timesteps)):
noises = []
x_index = torch.arange(len(x))
batches = x_index.split(self.batch_size, dim = 0)
for batch in batches:
noise = self.pred_noise(
x[batch], conds[batch], timesteps[i], batch_idx=batch)
noises += [noise]
noises = torch.cat(noises)
x = self.pred_next_x(x, noises, t, i, inversion=True)
if self.save_latents and t in self.timesteps_to_save:
torch.save(x, os.path.join(
save_path, f'noisy_latents_{t}.pt'))
# Save inverted noise latents
pth = os.path.join(save_path, f'noisy_latents_{t}.pt')
torch.save(x, pth)
print(f"[INFO] inverted latent saved to: {pth}")
return x
@torch.no_grad()
def ddim_sample(self, x, conds):
print("[INFO] reconstructing frames...")
timesteps = self.scheduler.timesteps
with torch.autocast(device_type=self.device, dtype=self.dtype):
for i, t in enumerate(tqdm(timesteps)):
noises = []
x_index = torch.arange(len(x))
batches = x_index.split(self.batch_size, dim = 0)
for batch in batches:
noise = self.pred_noise(
x[batch], conds[batch], t, batch_idx=batch)
noises += [noise]
noises = torch.cat(noises)
x = self.pred_next_x(x, noises, t, i, inversion=False)
return x
@torch.no_grad()
def pred_noise(self, x, cond, t, batch_idx=None):
# For sd-depth model
if self.use_depth:
depth = self.depths
if batch_idx is not None:
depth = depth[batch_idx]
x = torch.cat([x, depth.to(x)], dim=1)
kwargs = dict()
# Compute controlnet outputs
if self.control != "none":
if batch_idx is None:
controlnet_cond = self.controlnet_images
else:
controlnet_cond = self.controlnet_images[batch_idx]
controlnet_kwargs = get_controlnet_kwargs(self.controlnet, x, cond, t, controlnet_cond, self.controlnet_scale)
kwargs.update(controlnet_kwargs)
eps = self.unet(x, t, encoder_hidden_states=cond, **kwargs).sample
return eps
@torch.no_grad()
def pred_next_x(self, x, eps, t, i, inversion=False):
if inversion:
timesteps = reversed(self.scheduler.timesteps)
else:
timesteps = self.scheduler.timesteps
alpha_prod_t = self.scheduler.alphas_cumprod[t]
if inversion:
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i - 1]]
if i > 0 else self.scheduler.final_alpha_cumprod
)
else:
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[timesteps[i + 1]]
if i < len(timesteps) - 1
else self.scheduler.final_alpha_cumprod
)
mu = alpha_prod_t ** 0.5
sigma = (1 - alpha_prod_t) ** 0.5
mu_prev = alpha_prod_t_prev ** 0.5
sigma_prev = (1 - alpha_prod_t_prev) ** 0.5
if inversion:
pred_x0 = (x - sigma_prev * eps) / mu_prev
x = mu * pred_x0 + sigma * eps
else:
pred_x0 = (x - sigma * eps) / mu
x = mu_prev * pred_x0 + sigma_prev * eps
return x
@torch.no_grad()
def prepare_cond(self, prompts, n_frames):
if isinstance(prompts, str):
prompts = [prompts] * n_frames
cond = self.get_text_embeds(prompts[0])
conds = torch.cat([cond] * n_frames)
elif isinstance(prompts, list):
cond_ls = []
for prompt in prompts:
cond = self.get_text_embeds(prompt)
cond_ls += [cond]
conds = torch.cat(cond_ls)
return conds, prompts
def check_latent_exists(self, save_path):
save_timesteps = [self.scheduler.timesteps[0]]
if self.save_latents:
save_timesteps += self.timesteps_to_save
for ts in save_timesteps:
latent_path = os.path.join(
save_path, f'noisy_latents_{ts}.pt')
if not os.path.exists(latent_path):
return False
return True
@torch.no_grad()
def __call__(self, data_path, save_path):
self.scheduler.set_timesteps(self.steps)
save_path = get_latents_dir(save_path, self.model_key)
os.makedirs(save_path, exist_ok = True)
if self.check_latent_exists(save_path) and not self.force:
print(f"[INFO] inverted latents exist at: {save_path}. Skip inversion! Set 'inversion.force: True' to invert again.")
return
frames = load_video(data_path, self.frame_height, self.frame_width, device = self.device)
frame_ids = list(range(len(frames)))
if self.n_frames is not None:
frame_ids = frame_ids[:self.n_frames]
frames = frames[frame_ids]
if self.use_depth:
self.depths = prepare_depth(self.pipe, frames, frame_ids, self.work_dir)
conds, prompts = self.prepare_cond(self.prompt, len(frames))
with open(os.path.join(save_path, 'inversion_prompts.txt'), 'w') as f:
f.write('\n'.join(prompts))
if self.control != "none":
images = control_preprocess(
frames, self.control)
self.controlnet_images = images.to(self.device)
latents = self.encode_imgs_batch(frames)
torch.cuda.empty_cache()
print(f"[INFO] clean latents shape: {latents.shape}")
inverted_x = self.ddim_inversion(latents, conds, save_path)
save_config(self.config, save_path, inv = True)
if self.recon:
latent_reconstruction = self.ddim_sample(inverted_x, conds)
torch.cuda.empty_cache()
recon_frames = self.decode_latents_batch(
latent_reconstruction)
recon_save_path = os.path.join(save_path, 'recon_frames')
save_frames(recon_frames, recon_save_path, frame_ids = frame_ids)
if __name__ == "__main__":
config = load_config()
pipe, scheduler, model_key = init_model(
config.device, config.sd_version, config.model_key, config.inversion.control, config.float_precision)
config.model_key = model_key
seed_everything(config.seed)
inversion = Inverter(pipe, scheduler, config)
inversion(config.input_path, config.inversion.save_path)