|
|
|
|
@@ -2,6 +2,7 @@ import math
|
|
|
|
|
from tqdm import tqdm
|
|
|
|
|
from inspect import isfunction
|
|
|
|
|
from functools import partial
|
|
|
|
|
from contextlib import contextmanager
|
|
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
import torch.nn.functional as F
|
|
|
|
|
@@ -105,8 +106,8 @@ def cosine_beta_schedule(timesteps, s = 0.008):
|
|
|
|
|
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
|
|
|
|
"""
|
|
|
|
|
steps = timesteps + 1
|
|
|
|
|
x = torch.linspace(0, steps, steps)
|
|
|
|
|
alphas_cumprod = torch.cos(((x / steps) + s) / (1 + s) * torch.pi * 0.5) ** 2
|
|
|
|
|
x = torch.linspace(0, timesteps, steps)
|
|
|
|
|
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
|
|
|
|
|
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
|
|
|
|
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
|
|
|
|
return torch.clip(betas, 0, 0.999)
|
|
|
|
|
@@ -463,11 +464,11 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
net,
|
|
|
|
|
*,
|
|
|
|
|
clip,
|
|
|
|
|
timesteps=1000,
|
|
|
|
|
cond_drop_prob=0.2,
|
|
|
|
|
loss_type="l1",
|
|
|
|
|
predict_x0=True,
|
|
|
|
|
beta_schedule="cosine",
|
|
|
|
|
timesteps = 1000,
|
|
|
|
|
cond_drop_prob = 0.2,
|
|
|
|
|
loss_type = "l1",
|
|
|
|
|
predict_x0 = True,
|
|
|
|
|
beta_schedule = "cosine",
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
assert isinstance(clip, CLIP)
|
|
|
|
|
@@ -820,9 +821,12 @@ class Unet(nn.Module):
|
|
|
|
|
image_embed_dim,
|
|
|
|
|
cond_dim = None,
|
|
|
|
|
num_image_tokens = 4,
|
|
|
|
|
num_time_tokens = 2,
|
|
|
|
|
out_dim = None,
|
|
|
|
|
dim_mults=(1, 2, 4, 8),
|
|
|
|
|
channels = 3,
|
|
|
|
|
attn_dim_head = 32,
|
|
|
|
|
attn_heads = 8,
|
|
|
|
|
lowres_cond = False, # for cascading diffusion - https://cascaded-diffusion.github.io/
|
|
|
|
|
lowres_cond_upsample_mode = 'bilinear',
|
|
|
|
|
blur_sigma = 0.1,
|
|
|
|
|
@@ -830,6 +834,8 @@ class Unet(nn.Module):
|
|
|
|
|
sparse_attn = False,
|
|
|
|
|
sparse_attn_window = 8, # window size for sparse attention
|
|
|
|
|
attend_at_middle = True, # whether to have a layer of attention at the bottleneck (can turn off for higher resolution in cascading DDPM, before bringing in efficient attention)
|
|
|
|
|
cond_on_text_encodings = False,
|
|
|
|
|
cond_on_image_embeds = False,
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
# save locals to take care of some hyperparameters for cascading DDPM
|
|
|
|
|
@@ -862,8 +868,8 @@ class Unet(nn.Module):
|
|
|
|
|
SinusoidalPosEmb(dim),
|
|
|
|
|
nn.Linear(dim, dim * 4),
|
|
|
|
|
nn.GELU(),
|
|
|
|
|
nn.Linear(dim * 4, cond_dim),
|
|
|
|
|
Rearrange('b d -> b 1 d')
|
|
|
|
|
nn.Linear(dim * 4, cond_dim * num_time_tokens),
|
|
|
|
|
Rearrange('b (r d) -> b r d', r = num_time_tokens)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.image_to_cond = nn.Sequential(
|
|
|
|
|
@@ -873,11 +879,21 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.text_to_cond = nn.LazyLinear(cond_dim)
|
|
|
|
|
|
|
|
|
|
# finer control over whether to condition on image embeddings and text encodings
|
|
|
|
|
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
|
|
|
|
|
|
|
|
|
|
self.cond_on_text_encodings = cond_on_text_encodings
|
|
|
|
|
self.cond_on_image_embeds = cond_on_image_embeds
|
|
|
|
|
|
|
|
|
|
# for classifier free guidance
|
|
|
|
|
|
|
|
|
|
self.null_image_embed = nn.Parameter(torch.randn(1, num_image_tokens, cond_dim))
|
|
|
|
|
self.null_text_embed = nn.Parameter(torch.randn(1, 1, cond_dim))
|
|
|
|
|
|
|
|
|
|
# attention related params
|
|
|
|
|
|
|
|
|
|
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
|
|
|
|
|
|
|
|
|
|
# layers
|
|
|
|
|
|
|
|
|
|
self.downs = nn.ModuleList([])
|
|
|
|
|
@@ -891,7 +907,7 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.downs.append(nn.ModuleList([
|
|
|
|
|
ConvNextBlock(dim_in, dim_out, norm = ind != 0),
|
|
|
|
|
Residual(GridAttention(dim_out, window_size = sparse_attn_window)) if sparse_attn else nn.Identity(),
|
|
|
|
|
Residual(GridAttention(dim_out, window_size = sparse_attn_window, **attn_kwargs)) if sparse_attn else nn.Identity(),
|
|
|
|
|
ConvNextBlock(dim_out, dim_out, cond_dim = layer_cond_dim),
|
|
|
|
|
Downsample(dim_out) if not is_last else nn.Identity()
|
|
|
|
|
]))
|
|
|
|
|
@@ -899,7 +915,7 @@ class Unet(nn.Module):
|
|
|
|
|
mid_dim = dims[-1]
|
|
|
|
|
|
|
|
|
|
self.mid_block1 = ConvNextBlock(mid_dim, mid_dim, cond_dim = cond_dim)
|
|
|
|
|
self.mid_attn = EinopsToAndFrom('b c h w', 'b (h w) c', Residual(Attention(mid_dim))) if attend_at_middle else None
|
|
|
|
|
self.mid_attn = EinopsToAndFrom('b c h w', 'b (h w) c', Residual(Attention(mid_dim, **attn_kwargs))) if attend_at_middle else None
|
|
|
|
|
self.mid_block2 = ConvNextBlock(mid_dim, mid_dim, cond_dim = cond_dim)
|
|
|
|
|
|
|
|
|
|
for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])):
|
|
|
|
|
@@ -908,7 +924,7 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.ups.append(nn.ModuleList([
|
|
|
|
|
ConvNextBlock(dim_out * 2, dim_in, cond_dim = layer_cond_dim),
|
|
|
|
|
Residual(GridAttention(dim_in, window_size = sparse_attn_window)) if sparse_attn else nn.Identity(),
|
|
|
|
|
Residual(GridAttention(dim_in, window_size = sparse_attn_window, **attn_kwargs)) if sparse_attn else nn.Identity(),
|
|
|
|
|
ConvNextBlock(dim_in, dim_in, cond_dim = layer_cond_dim),
|
|
|
|
|
Upsample(dim_in)
|
|
|
|
|
]))
|
|
|
|
|
@@ -982,17 +998,22 @@ class Unet(nn.Module):
|
|
|
|
|
# mask out image embedding depending on condition dropout
|
|
|
|
|
# for classifier free guidance
|
|
|
|
|
|
|
|
|
|
image_tokens = self.image_to_cond(image_embed)
|
|
|
|
|
image_tokens = None
|
|
|
|
|
|
|
|
|
|
image_tokens = torch.where(
|
|
|
|
|
cond_prob_mask,
|
|
|
|
|
image_tokens,
|
|
|
|
|
self.null_image_embed
|
|
|
|
|
)
|
|
|
|
|
if self.cond_on_image_embeds:
|
|
|
|
|
image_tokens = self.image_to_cond(image_embed)
|
|
|
|
|
|
|
|
|
|
image_tokens = torch.where(
|
|
|
|
|
cond_prob_mask,
|
|
|
|
|
image_tokens,
|
|
|
|
|
self.null_image_embed
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# take care of text encodings (optional)
|
|
|
|
|
|
|
|
|
|
if exists(text_encodings):
|
|
|
|
|
text_tokens = None
|
|
|
|
|
|
|
|
|
|
if exists(text_encodings) and self.cond_on_text_encodings:
|
|
|
|
|
text_tokens = self.text_to_cond(text_encodings)
|
|
|
|
|
text_tokens = torch.where(
|
|
|
|
|
cond_prob_mask,
|
|
|
|
|
@@ -1002,12 +1023,15 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
# main conditioning tokens (c)
|
|
|
|
|
|
|
|
|
|
c = torch.cat((time_tokens, image_tokens), dim = -2)
|
|
|
|
|
c = time_tokens
|
|
|
|
|
|
|
|
|
|
if exists(image_tokens):
|
|
|
|
|
c = torch.cat((c, image_tokens), dim = -2)
|
|
|
|
|
|
|
|
|
|
# text and image conditioning tokens (mid_c)
|
|
|
|
|
# to save on compute, only do cross attention based conditioning on the inner most layers of the Unet
|
|
|
|
|
|
|
|
|
|
mid_c = c if not exists(text_encodings) else torch.cat((c, text_tokens), dim = -2)
|
|
|
|
|
mid_c = c if not exists(text_tokens) else torch.cat((c, text_tokens), dim = -2)
|
|
|
|
|
|
|
|
|
|
# go through the layers of the unet, down and up
|
|
|
|
|
|
|
|
|
|
@@ -1124,6 +1148,25 @@ class Decoder(nn.Module):
|
|
|
|
|
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
|
|
|
|
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
|
|
|
|
|
|
|
|
|
def get_unet(self, unet_number):
|
|
|
|
|
assert 0 < unet_number <= len(self.unets)
|
|
|
|
|
index = unet_number - 1
|
|
|
|
|
return self.unets[index]
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def one_unet_in_gpu(self, unet_number = None, unet = None):
|
|
|
|
|
assert exists(unet_number) ^ exists(unet)
|
|
|
|
|
|
|
|
|
|
if exists(unet_number):
|
|
|
|
|
unet = self.get_unet(unet_number)
|
|
|
|
|
|
|
|
|
|
self.cuda()
|
|
|
|
|
self.unets.cpu()
|
|
|
|
|
|
|
|
|
|
unet.cuda()
|
|
|
|
|
yield
|
|
|
|
|
unet.cpu()
|
|
|
|
|
|
|
|
|
|
def get_text_encodings(self, text):
|
|
|
|
|
text_encodings = self.clip.text_transformer(text)
|
|
|
|
|
return text_encodings[:, 1:]
|
|
|
|
|
@@ -1228,20 +1271,21 @@ class Decoder(nn.Module):
|
|
|
|
|
text_encodings = self.get_text_encodings(text) if exists(text) else None
|
|
|
|
|
|
|
|
|
|
img = None
|
|
|
|
|
|
|
|
|
|
for unet, image_size in tqdm(zip(self.unets, self.image_sizes)):
|
|
|
|
|
shape = (batch_size, channels, image_size, image_size)
|
|
|
|
|
img = self.p_sample_loop(unet, shape, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = img)
|
|
|
|
|
with self.one_unet_in_gpu(unet = unet):
|
|
|
|
|
shape = (batch_size, channels, image_size, image_size)
|
|
|
|
|
img = self.p_sample_loop(unet, shape, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = img)
|
|
|
|
|
|
|
|
|
|
return img
|
|
|
|
|
|
|
|
|
|
def forward(self, image, text = None, image_embed = None, text_encodings = None, unet_number = None):
|
|
|
|
|
assert not (len(self.unets) > 1 and not exists(unet_number)), f'you must specify which unet you want trained, from a range of 1 to {len(self.unets)}, if you are training cascading DDPM (multiple unets)'
|
|
|
|
|
unet_number = default(unet_number, 1)
|
|
|
|
|
assert 1 <= unet_number <= len(self.unets)
|
|
|
|
|
|
|
|
|
|
index = unet_number - 1
|
|
|
|
|
unet = self.unets[index]
|
|
|
|
|
target_image_size = self.image_sizes[index]
|
|
|
|
|
unet = self.get_unet(unet_number)
|
|
|
|
|
|
|
|
|
|
target_image_size = self.image_sizes[unet_number - 1]
|
|
|
|
|
|
|
|
|
|
b, c, h, w, device, = *image.shape, image.device
|
|
|
|
|
|
|
|
|
|
@@ -1255,7 +1299,7 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
text_encodings = self.get_text_encodings(text) if exists(text) and not exists(text_encodings) else None
|
|
|
|
|
|
|
|
|
|
lowres_cond_img = image if index > 0 else None
|
|
|
|
|
lowres_cond_img = image if unet_number > 1 else None
|
|
|
|
|
ddpm_image = resize_image_to(image, target_image_size)
|
|
|
|
|
return self.p_losses(unet, ddpm_image, times, image_embed = image_embed, text_encodings = text_encodings, lowres_cond_img = lowres_cond_img)
|
|
|
|
|
|
|
|
|
|
|