|
|
|
|
@@ -77,6 +77,11 @@ def cast_tuple(val, length = None):
|
|
|
|
|
def module_device(module):
|
|
|
|
|
return next(module.parameters()).device
|
|
|
|
|
|
|
|
|
|
def zero_init_(m):
|
|
|
|
|
nn.init.zeros_(m.weight)
|
|
|
|
|
if exists(m.bias):
|
|
|
|
|
nn.init.zeros_(m.bias)
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def null_context(*args, **kwargs):
|
|
|
|
|
yield
|
|
|
|
|
@@ -141,7 +146,7 @@ def resize_image_to(
|
|
|
|
|
scale_factors = target_image_size / orig_image_size
|
|
|
|
|
out = resize(image, scale_factors = scale_factors, **kwargs)
|
|
|
|
|
else:
|
|
|
|
|
out = F.interpolate(image, target_image_size, mode = 'nearest', align_corners = False)
|
|
|
|
|
out = F.interpolate(image, target_image_size, mode = 'nearest')
|
|
|
|
|
|
|
|
|
|
if exists(clamp_range):
|
|
|
|
|
out = out.clamp(*clamp_range)
|
|
|
|
|
@@ -160,7 +165,7 @@ def unnormalize_zero_to_one(normed_img):
|
|
|
|
|
|
|
|
|
|
# clip related adapters
|
|
|
|
|
|
|
|
|
|
EmbeddedText = namedtuple('EmbedTextReturn', ['text_embed', 'text_encodings', 'text_mask'])
|
|
|
|
|
EmbeddedText = namedtuple('EmbedTextReturn', ['text_embed', 'text_encodings'])
|
|
|
|
|
EmbeddedImage = namedtuple('EmbedImageReturn', ['image_embed', 'image_encodings'])
|
|
|
|
|
|
|
|
|
|
class BaseClipAdapter(nn.Module):
|
|
|
|
|
@@ -220,7 +225,8 @@ class XClipAdapter(BaseClipAdapter):
|
|
|
|
|
encoder_output = self.clip.text_transformer(text)
|
|
|
|
|
text_cls, text_encodings = encoder_output[:, 0], encoder_output[:, 1:]
|
|
|
|
|
text_embed = self.clip.to_text_latent(text_cls)
|
|
|
|
|
return EmbeddedText(l2norm(text_embed), text_encodings, text_mask)
|
|
|
|
|
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
|
|
|
|
|
return EmbeddedText(l2norm(text_embed), text_encodings)
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def embed_image(self, image):
|
|
|
|
|
@@ -255,7 +261,8 @@ class CoCaAdapter(BaseClipAdapter):
|
|
|
|
|
text = text[..., :self.max_text_len]
|
|
|
|
|
text_mask = text != 0
|
|
|
|
|
text_embed, text_encodings = self.clip.embed_text(text)
|
|
|
|
|
return EmbeddedText(text_embed, text_encodings, text_mask)
|
|
|
|
|
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
|
|
|
|
|
return EmbeddedText(text_embed, text_encodings)
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def embed_image(self, image):
|
|
|
|
|
@@ -271,6 +278,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|
|
|
|
import clip
|
|
|
|
|
openai_clip, preprocess = clip.load(name)
|
|
|
|
|
super().__init__(openai_clip)
|
|
|
|
|
self.eos_id = 49407 # for handling 0 being also '!'
|
|
|
|
|
|
|
|
|
|
text_attention_final = self.find_layer('ln_final')
|
|
|
|
|
self.handle = text_attention_final.register_forward_hook(self._hook)
|
|
|
|
|
@@ -309,13 +317,17 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def embed_text(self, text):
|
|
|
|
|
text = text[..., :self.max_text_len]
|
|
|
|
|
text_mask = text != 0
|
|
|
|
|
|
|
|
|
|
is_eos_id = (text == self.eos_id)
|
|
|
|
|
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
|
|
|
|
|
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
|
|
|
|
|
assert not self.cleared
|
|
|
|
|
|
|
|
|
|
text_embed = self.clip.encode_text(text)
|
|
|
|
|
text_encodings = self.text_encodings
|
|
|
|
|
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
|
|
|
|
|
del self.text_encodings
|
|
|
|
|
return EmbeddedText(l2norm(text_embed.float()), text_encodings.float(), text_mask)
|
|
|
|
|
return EmbeddedText(l2norm(text_embed.float()), text_encodings.float())
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def embed_image(self, image):
|
|
|
|
|
@@ -505,6 +517,12 @@ class NoiseScheduler(nn.Module):
|
|
|
|
|
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def predict_noise_from_start(self, x_t, t, x0):
|
|
|
|
|
return (
|
|
|
|
|
(x0 - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t) / \
|
|
|
|
|
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def p2_reweigh_loss(self, loss, times):
|
|
|
|
|
if not self.has_p2_loss_reweighting:
|
|
|
|
|
return loss
|
|
|
|
|
@@ -513,25 +531,31 @@ class NoiseScheduler(nn.Module):
|
|
|
|
|
# diffusion prior
|
|
|
|
|
|
|
|
|
|
class LayerNorm(nn.Module):
|
|
|
|
|
def __init__(self, dim, eps = 1e-5):
|
|
|
|
|
def __init__(self, dim, eps = 1e-5, stable = False):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.eps = eps
|
|
|
|
|
self.stable = stable
|
|
|
|
|
self.g = nn.Parameter(torch.ones(dim))
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
x = x / x.amax(dim = -1, keepdim = True).detach()
|
|
|
|
|
if self.stable:
|
|
|
|
|
x = x / x.amax(dim = -1, keepdim = True).detach()
|
|
|
|
|
|
|
|
|
|
var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
|
|
|
|
|
mean = torch.mean(x, dim = -1, keepdim = True)
|
|
|
|
|
return (x - mean) * (var + self.eps).rsqrt() * self.g
|
|
|
|
|
|
|
|
|
|
class ChanLayerNorm(nn.Module):
|
|
|
|
|
def __init__(self, dim, eps = 1e-5):
|
|
|
|
|
def __init__(self, dim, eps = 1e-5, stable = False):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.eps = eps
|
|
|
|
|
self.stable = stable
|
|
|
|
|
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
x = x / x.amax(dim = 1, keepdim = True).detach()
|
|
|
|
|
if self.stable:
|
|
|
|
|
x = x / x.amax(dim = 1, keepdim = True).detach()
|
|
|
|
|
|
|
|
|
|
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
|
|
|
|
|
mean = torch.mean(x, dim = 1, keepdim = True)
|
|
|
|
|
return (x - mean) * (var + self.eps).rsqrt() * self.g
|
|
|
|
|
@@ -655,7 +679,7 @@ class Attention(nn.Module):
|
|
|
|
|
dropout = 0.,
|
|
|
|
|
causal = False,
|
|
|
|
|
rotary_emb = None,
|
|
|
|
|
pb_relax_alpha = 32 ** 2
|
|
|
|
|
pb_relax_alpha = 128
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.pb_relax_alpha = pb_relax_alpha
|
|
|
|
|
@@ -746,6 +770,7 @@ class CausalTransformer(nn.Module):
|
|
|
|
|
dim_head = 64,
|
|
|
|
|
heads = 8,
|
|
|
|
|
ff_mult = 4,
|
|
|
|
|
norm_in = False,
|
|
|
|
|
norm_out = True,
|
|
|
|
|
attn_dropout = 0.,
|
|
|
|
|
ff_dropout = 0.,
|
|
|
|
|
@@ -754,6 +779,8 @@ class CausalTransformer(nn.Module):
|
|
|
|
|
rotary_emb = True
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.init_norm = LayerNorm(dim) if norm_in else nn.Identity() # from latest BLOOM model and Yandex's YaLM
|
|
|
|
|
|
|
|
|
|
self.rel_pos_bias = RelPosBias(heads = heads)
|
|
|
|
|
|
|
|
|
|
rotary_emb = RotaryEmbedding(dim = min(32, dim_head)) if rotary_emb else None
|
|
|
|
|
@@ -765,20 +792,18 @@ class CausalTransformer(nn.Module):
|
|
|
|
|
FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout, post_activation_norm = normformer)
|
|
|
|
|
]))
|
|
|
|
|
|
|
|
|
|
self.norm = LayerNorm(dim) if norm_out else nn.Identity() # unclear in paper whether they projected after the classic layer norm for the final denoised image embedding, or just had the transformer output it directly: plan on offering both options
|
|
|
|
|
self.norm = LayerNorm(dim, stable = True) if norm_out else nn.Identity() # unclear in paper whether they projected after the classic layer norm for the final denoised image embedding, or just had the transformer output it directly: plan on offering both options
|
|
|
|
|
self.project_out = nn.Linear(dim, dim, bias = False) if final_proj else nn.Identity()
|
|
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
|
self,
|
|
|
|
|
x,
|
|
|
|
|
mask = None # we will need a mask here, due to variable length of the text encodings - also offer dalle1 strategy with padding token embeddings
|
|
|
|
|
):
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
n, device = x.shape[1], x.device
|
|
|
|
|
|
|
|
|
|
x = self.init_norm(x)
|
|
|
|
|
|
|
|
|
|
attn_bias = self.rel_pos_bias(n, n + 1, device = device)
|
|
|
|
|
|
|
|
|
|
for attn, ff in self.layers:
|
|
|
|
|
x = attn(x, mask = mask, attn_bias = attn_bias) + x
|
|
|
|
|
x = attn(x, attn_bias = attn_bias) + x
|
|
|
|
|
x = ff(x) + x
|
|
|
|
|
|
|
|
|
|
out = self.norm(x)
|
|
|
|
|
@@ -792,6 +817,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
num_time_embeds = 1,
|
|
|
|
|
num_image_embeds = 1,
|
|
|
|
|
num_text_embeds = 1,
|
|
|
|
|
max_text_len = 256,
|
|
|
|
|
**kwargs
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
@@ -817,6 +843,11 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
self.learned_query = nn.Parameter(torch.randn(dim))
|
|
|
|
|
self.causal_transformer = CausalTransformer(dim = dim, **kwargs)
|
|
|
|
|
|
|
|
|
|
# dalle1 learned padding strategy
|
|
|
|
|
|
|
|
|
|
self.max_text_len = max_text_len
|
|
|
|
|
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, dim))
|
|
|
|
|
|
|
|
|
|
def forward_with_cond_scale(
|
|
|
|
|
self,
|
|
|
|
|
*args,
|
|
|
|
|
@@ -838,7 +869,6 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
*,
|
|
|
|
|
text_embed,
|
|
|
|
|
text_encodings = None,
|
|
|
|
|
mask = None,
|
|
|
|
|
cond_drop_prob = 0.
|
|
|
|
|
):
|
|
|
|
|
batch, dim, device, dtype = *image_embed.shape, image_embed.device, image_embed.dtype
|
|
|
|
|
@@ -856,9 +886,28 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
|
|
|
|
|
if not exists(text_encodings):
|
|
|
|
|
text_encodings = torch.empty((batch, 0, dim), device = device, dtype = dtype)
|
|
|
|
|
|
|
|
|
|
mask = torch.any(text_encodings != 0., dim = -1)
|
|
|
|
|
|
|
|
|
|
if not exists(mask):
|
|
|
|
|
mask = torch.ones((batch, text_encodings.shape[-2]), device = device, dtype = torch.bool)
|
|
|
|
|
# replace any padding in the text encodings with learned padding tokens unique across position
|
|
|
|
|
|
|
|
|
|
text_encodings = text_encodings[:, :self.max_text_len]
|
|
|
|
|
mask = mask[:, :self.max_text_len]
|
|
|
|
|
|
|
|
|
|
text_len = text_encodings.shape[-2]
|
|
|
|
|
remainder = self.max_text_len - text_len
|
|
|
|
|
|
|
|
|
|
if remainder > 0:
|
|
|
|
|
text_encodings = F.pad(text_encodings, (0, 0, 0, remainder), value = 0.)
|
|
|
|
|
mask = F.pad(mask, (0, remainder), value = False)
|
|
|
|
|
|
|
|
|
|
null_text_embeds = self.null_text_embed.to(text_encodings.dtype)
|
|
|
|
|
|
|
|
|
|
text_encodings = torch.where(
|
|
|
|
|
rearrange(mask, 'b n -> b n 1').clone(),
|
|
|
|
|
text_encodings,
|
|
|
|
|
null_text_embeds
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# classifier free guidance
|
|
|
|
|
|
|
|
|
|
@@ -875,9 +924,8 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
# whether text embedding is used for conditioning depends on whether text encodings are available for attention (for classifier free guidance, even though it seems from the paper it was not used in the prior ddpm, as the objective is different)
|
|
|
|
|
# but let's just do it right
|
|
|
|
|
|
|
|
|
|
if exists(mask):
|
|
|
|
|
attend_padding = 1 + num_time_embeds + num_image_embeds # 1 for learned queries + number of image embeds + time embeds
|
|
|
|
|
mask = F.pad(mask, (0, attend_padding), value = True) # extend mask for text embedding, noised image embedding, time step embedding, and learned query
|
|
|
|
|
attend_padding = 1 + num_time_embeds + num_image_embeds # 1 for learned queries + number of image embeds + time embeds
|
|
|
|
|
mask = F.pad(mask, (0, attend_padding), value = True) # extend mask for text embedding, noised image embedding, time step embedding, and learned query
|
|
|
|
|
|
|
|
|
|
time_embed = self.to_time_embeds(diffusion_timesteps)
|
|
|
|
|
|
|
|
|
|
@@ -893,7 +941,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
|
|
|
|
|
# attend
|
|
|
|
|
|
|
|
|
|
tokens = self.causal_transformer(tokens, mask = mask)
|
|
|
|
|
tokens = self.causal_transformer(tokens)
|
|
|
|
|
|
|
|
|
|
# get learned query, which should predict the image embedding (per DDPM timestep)
|
|
|
|
|
|
|
|
|
|
@@ -911,19 +959,23 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
image_size = None,
|
|
|
|
|
image_channels = 3,
|
|
|
|
|
timesteps = 1000,
|
|
|
|
|
sample_timesteps = None,
|
|
|
|
|
cond_drop_prob = 0.,
|
|
|
|
|
loss_type = "l2",
|
|
|
|
|
predict_x_start = True,
|
|
|
|
|
beta_schedule = "cosine",
|
|
|
|
|
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
|
|
|
|
sampling_clamp_l2norm = False,
|
|
|
|
|
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
|
|
|
|
sampling_clamp_l2norm = False, # whether to l2norm clamp the image embed at each denoising iteration (analogous to -1 to 1 clipping for usual DDPMs)
|
|
|
|
|
sampling_final_clamp_l2norm = False, # whether to l2norm the final image embedding output (this is also done for images in ddpm)
|
|
|
|
|
training_clamp_l2norm = False,
|
|
|
|
|
init_image_embed_l2norm = False,
|
|
|
|
|
image_embed_scale = None, # this is for scaling the l2-normed image embedding, so it is more suitable for gaussian diffusion, as outlined by Katherine (@crowsonkb) https://github.com/lucidrains/DALLE2-pytorch/issues/60#issue-1226116132
|
|
|
|
|
image_embed_scale = None, # this is for scaling the l2-normed image embedding, so it is more suitable for gaussian diffusion, as outlined by Katherine (@crowsonkb) https://github.com/lucidrains/DALLE2-pytorch/issues/60#issue-1226116132
|
|
|
|
|
clip_adapter_overrides = dict()
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
self.sample_timesteps = sample_timesteps
|
|
|
|
|
|
|
|
|
|
self.noise_scheduler = NoiseScheduler(
|
|
|
|
|
beta_schedule = beta_schedule,
|
|
|
|
|
timesteps = timesteps,
|
|
|
|
|
@@ -954,23 +1006,32 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
self.condition_on_text_encodings = condition_on_text_encodings
|
|
|
|
|
|
|
|
|
|
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
|
|
|
|
|
|
|
|
|
|
self.predict_x_start = predict_x_start
|
|
|
|
|
|
|
|
|
|
# @crowsonkb 's suggestion - https://github.com/lucidrains/DALLE2-pytorch/issues/60#issue-1226116132
|
|
|
|
|
|
|
|
|
|
self.image_embed_scale = default(image_embed_scale, self.image_embed_dim ** 0.5)
|
|
|
|
|
|
|
|
|
|
# whether to force an l2norm, similar to clipping denoised, when sampling
|
|
|
|
|
|
|
|
|
|
self.sampling_clamp_l2norm = sampling_clamp_l2norm
|
|
|
|
|
self.sampling_final_clamp_l2norm = sampling_final_clamp_l2norm
|
|
|
|
|
|
|
|
|
|
self.training_clamp_l2norm = training_clamp_l2norm
|
|
|
|
|
self.init_image_embed_l2norm = init_image_embed_l2norm
|
|
|
|
|
|
|
|
|
|
# device tracker
|
|
|
|
|
|
|
|
|
|
self.register_buffer('_dummy', torch.tensor([True]), persistent = False)
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def device(self):
|
|
|
|
|
return self._dummy.device
|
|
|
|
|
|
|
|
|
|
def l2norm_clamp_embed(self, image_embed):
|
|
|
|
|
return l2norm(image_embed) * self.image_embed_scale
|
|
|
|
|
|
|
|
|
|
def p_mean_variance(self, x, t, text_cond, clip_denoised = False, cond_scale = 1.):
|
|
|
|
|
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'the model was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
|
|
|
|
|
|
|
|
|
|
@@ -978,8 +1039,6 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
|
|
|
|
|
if self.predict_x_start:
|
|
|
|
|
x_recon = pred
|
|
|
|
|
# not 100% sure of this above line - for any spectators, let me know in the github issues (or through a pull request) if you know how to correctly do this
|
|
|
|
|
# i'll be rereading https://arxiv.org/abs/2111.14822, where i think a similar approach is taken
|
|
|
|
|
else:
|
|
|
|
|
x_recon = self.noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
|
|
|
|
|
|
|
|
|
|
@@ -1002,21 +1061,81 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop(self, shape, text_cond, cond_scale = 1.):
|
|
|
|
|
device = self.device
|
|
|
|
|
|
|
|
|
|
b = shape[0]
|
|
|
|
|
image_embed = torch.randn(shape, device=device)
|
|
|
|
|
def p_sample_loop_ddpm(self, shape, text_cond, cond_scale = 1.):
|
|
|
|
|
batch, device = shape[0], self.device
|
|
|
|
|
image_embed = torch.randn(shape, device = device)
|
|
|
|
|
|
|
|
|
|
if self.init_image_embed_l2norm:
|
|
|
|
|
image_embed = l2norm(image_embed) * self.image_embed_scale
|
|
|
|
|
|
|
|
|
|
for i in tqdm(reversed(range(0, self.noise_scheduler.num_timesteps)), desc='sampling loop time step', total=self.noise_scheduler.num_timesteps):
|
|
|
|
|
times = torch.full((b,), i, device = device, dtype = torch.long)
|
|
|
|
|
times = torch.full((batch,), i, device = device, dtype = torch.long)
|
|
|
|
|
image_embed = self.p_sample(image_embed, times, text_cond = text_cond, cond_scale = cond_scale)
|
|
|
|
|
|
|
|
|
|
if self.sampling_final_clamp_l2norm and self.predict_x_start:
|
|
|
|
|
image_embed = self.l2norm_clamp_embed(image_embed)
|
|
|
|
|
|
|
|
|
|
return image_embed
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop_ddim(self, shape, text_cond, *, timesteps, eta = 1., cond_scale = 1.):
|
|
|
|
|
batch, device, alphas, total_timesteps = shape[0], self.device, self.noise_scheduler.alphas_cumprod_prev, self.noise_scheduler.num_timesteps
|
|
|
|
|
|
|
|
|
|
times = torch.linspace(0., total_timesteps, steps = timesteps + 2)[:-1]
|
|
|
|
|
|
|
|
|
|
times = list(reversed(times.int().tolist()))
|
|
|
|
|
time_pairs = list(zip(times[:-1], times[1:]))
|
|
|
|
|
|
|
|
|
|
image_embed = torch.randn(shape, device = device)
|
|
|
|
|
|
|
|
|
|
if self.init_image_embed_l2norm:
|
|
|
|
|
image_embed = l2norm(image_embed) * self.image_embed_scale
|
|
|
|
|
|
|
|
|
|
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
|
|
|
|
|
alpha = alphas[time]
|
|
|
|
|
alpha_next = alphas[time_next]
|
|
|
|
|
|
|
|
|
|
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
|
|
|
|
|
|
|
|
|
|
pred = self.net.forward_with_cond_scale(image_embed, time_cond, cond_scale = cond_scale, **text_cond)
|
|
|
|
|
|
|
|
|
|
if self.predict_x_start:
|
|
|
|
|
x_start = pred
|
|
|
|
|
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = pred)
|
|
|
|
|
else:
|
|
|
|
|
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred)
|
|
|
|
|
pred_noise = pred
|
|
|
|
|
|
|
|
|
|
if not self.predict_x_start:
|
|
|
|
|
x_start.clamp_(-1., 1.)
|
|
|
|
|
|
|
|
|
|
if self.predict_x_start and self.sampling_clamp_l2norm:
|
|
|
|
|
x_start = self.l2norm_clamp_embed(x_start)
|
|
|
|
|
|
|
|
|
|
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
|
|
|
|
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
|
|
|
|
noise = torch.randn_like(image_embed) if time_next > 0 else 0.
|
|
|
|
|
|
|
|
|
|
image_embed = x_start * alpha_next.sqrt() + \
|
|
|
|
|
c1 * noise + \
|
|
|
|
|
c2 * pred_noise
|
|
|
|
|
|
|
|
|
|
if self.predict_x_start and self.sampling_final_clamp_l2norm:
|
|
|
|
|
image_embed = self.l2norm_clamp_embed(image_embed)
|
|
|
|
|
|
|
|
|
|
return image_embed
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop(self, *args, timesteps = None, **kwargs):
|
|
|
|
|
timesteps = default(timesteps, self.noise_scheduler.num_timesteps)
|
|
|
|
|
assert timesteps <= self.noise_scheduler.num_timesteps
|
|
|
|
|
is_ddim = timesteps < self.noise_scheduler.num_timesteps
|
|
|
|
|
|
|
|
|
|
if not is_ddim:
|
|
|
|
|
return self.p_sample_loop_ddpm(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
return self.p_sample_loop_ddim(*args, **kwargs, timesteps = timesteps)
|
|
|
|
|
|
|
|
|
|
def p_losses(self, image_embed, times, text_cond, noise = None):
|
|
|
|
|
noise = default(noise, lambda: torch.randn_like(image_embed))
|
|
|
|
|
|
|
|
|
|
@@ -1030,7 +1149,7 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if self.predict_x_start and self.training_clamp_l2norm:
|
|
|
|
|
pred = l2norm(pred) * self.image_embed_scale
|
|
|
|
|
pred = self.l2norm_clamp_embed(pred)
|
|
|
|
|
|
|
|
|
|
target = noise if not self.predict_x_start else image_embed
|
|
|
|
|
|
|
|
|
|
@@ -1051,7 +1170,15 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
@eval_decorator
|
|
|
|
|
def sample(self, text, num_samples_per_batch = 2, cond_scale = 1.):
|
|
|
|
|
def sample(
|
|
|
|
|
self,
|
|
|
|
|
text,
|
|
|
|
|
num_samples_per_batch = 2,
|
|
|
|
|
cond_scale = 1.,
|
|
|
|
|
timesteps = None
|
|
|
|
|
):
|
|
|
|
|
timesteps = default(timesteps, self.sample_timesteps)
|
|
|
|
|
|
|
|
|
|
# in the paper, what they did was
|
|
|
|
|
# sample 2 image embeddings, choose the top 1 similarity, as judged by CLIP
|
|
|
|
|
text = repeat(text, 'b ... -> (b r) ...', r = num_samples_per_batch)
|
|
|
|
|
@@ -1059,14 +1186,14 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
batch_size = text.shape[0]
|
|
|
|
|
image_embed_dim = self.image_embed_dim
|
|
|
|
|
|
|
|
|
|
text_embed, text_encodings, text_mask = self.clip.embed_text(text)
|
|
|
|
|
text_embed, text_encodings = self.clip.embed_text(text)
|
|
|
|
|
|
|
|
|
|
text_cond = dict(text_embed = text_embed)
|
|
|
|
|
|
|
|
|
|
if self.condition_on_text_encodings:
|
|
|
|
|
text_cond = {**text_cond, 'text_encodings': text_encodings, 'mask': text_mask}
|
|
|
|
|
text_cond = {**text_cond, 'text_encodings': text_encodings}
|
|
|
|
|
|
|
|
|
|
image_embeds = self.p_sample_loop((batch_size, image_embed_dim), text_cond = text_cond, cond_scale = cond_scale)
|
|
|
|
|
image_embeds = self.p_sample_loop((batch_size, image_embed_dim), text_cond = text_cond, cond_scale = cond_scale, timesteps = timesteps)
|
|
|
|
|
|
|
|
|
|
# retrieve original unscaled image embed
|
|
|
|
|
|
|
|
|
|
@@ -1092,7 +1219,6 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
text_embed = None, # allow for training on preprocessed CLIP text and image embeddings
|
|
|
|
|
image_embed = None,
|
|
|
|
|
text_encodings = None, # as well as CLIP text encodings
|
|
|
|
|
text_mask = None, # text mask <- may eventually opt for the learned padding tokens technique from DALL-E1 to reduce complexity
|
|
|
|
|
*args,
|
|
|
|
|
**kwargs
|
|
|
|
|
):
|
|
|
|
|
@@ -1106,13 +1232,13 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
# calculate text conditionings, based on what is passed in
|
|
|
|
|
|
|
|
|
|
if exists(text):
|
|
|
|
|
text_embed, text_encodings, text_mask = self.clip.embed_text(text)
|
|
|
|
|
text_embed, text_encodings = self.clip.embed_text(text)
|
|
|
|
|
|
|
|
|
|
text_cond = dict(text_embed = text_embed)
|
|
|
|
|
|
|
|
|
|
if self.condition_on_text_encodings:
|
|
|
|
|
assert exists(text_encodings), 'text encodings must be present for diffusion prior if specified'
|
|
|
|
|
text_cond = {**text_cond, 'text_encodings': text_encodings, 'mask': text_mask}
|
|
|
|
|
text_cond = {**text_cond, 'text_encodings': text_encodings}
|
|
|
|
|
|
|
|
|
|
# timestep conditioning from ddpm
|
|
|
|
|
|
|
|
|
|
@@ -1129,17 +1255,44 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
|
|
|
|
|
# decoder
|
|
|
|
|
|
|
|
|
|
def ConvTransposeUpsample(dim, dim_out = None):
|
|
|
|
|
dim_out = default(dim_out, dim)
|
|
|
|
|
return nn.ConvTranspose2d(dim, dim_out, 4, 2, 1)
|
|
|
|
|
|
|
|
|
|
def NearestUpsample(dim, dim_out = None):
|
|
|
|
|
dim_out = default(dim_out, dim)
|
|
|
|
|
|
|
|
|
|
return nn.Sequential(
|
|
|
|
|
nn.Upsample(scale_factor = 2, mode = 'nearest'),
|
|
|
|
|
nn.Conv2d(dim, dim_out, 3, padding = 1)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
class PixelShuffleUpsample(nn.Module):
|
|
|
|
|
"""
|
|
|
|
|
code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts
|
|
|
|
|
https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf
|
|
|
|
|
"""
|
|
|
|
|
def __init__(self, dim, dim_out = None):
|
|
|
|
|
super().__init__()
|
|
|
|
|
dim_out = default(dim_out, dim)
|
|
|
|
|
conv = nn.Conv2d(dim, dim_out * 4, 1)
|
|
|
|
|
|
|
|
|
|
self.net = nn.Sequential(
|
|
|
|
|
conv,
|
|
|
|
|
nn.SiLU(),
|
|
|
|
|
nn.PixelShuffle(2)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.init_conv_(conv)
|
|
|
|
|
|
|
|
|
|
def init_conv_(self, conv):
|
|
|
|
|
o, i, h, w = conv.weight.shape
|
|
|
|
|
conv_weight = torch.empty(o // 4, i, h, w)
|
|
|
|
|
nn.init.kaiming_uniform_(conv_weight)
|
|
|
|
|
conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')
|
|
|
|
|
|
|
|
|
|
conv.weight.data.copy_(conv_weight)
|
|
|
|
|
nn.init.zeros_(conv.bias.data)
|
|
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
return self.net(x)
|
|
|
|
|
|
|
|
|
|
def Downsample(dim, *, dim_out = None):
|
|
|
|
|
dim_out = default(dim_out, dim)
|
|
|
|
|
return nn.Conv2d(dim, dim_out, 4, 2, 1)
|
|
|
|
|
@@ -1402,7 +1555,7 @@ class Unet(nn.Module):
|
|
|
|
|
cross_embed_downsample_kernel_sizes = (2, 4),
|
|
|
|
|
memory_efficient = False,
|
|
|
|
|
scale_skip_connection = False,
|
|
|
|
|
nearest_upsample = False,
|
|
|
|
|
pixel_shuffle_upsample = True,
|
|
|
|
|
final_conv_kernel_size = 1,
|
|
|
|
|
**kwargs
|
|
|
|
|
):
|
|
|
|
|
@@ -1468,10 +1621,12 @@ class Unet(nn.Module):
|
|
|
|
|
# text encoding conditioning (optional)
|
|
|
|
|
|
|
|
|
|
self.text_to_cond = None
|
|
|
|
|
self.text_embed_dim = None
|
|
|
|
|
|
|
|
|
|
if cond_on_text_encodings:
|
|
|
|
|
assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text_encodings is True'
|
|
|
|
|
self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)
|
|
|
|
|
self.text_embed_dim = text_embed_dim
|
|
|
|
|
|
|
|
|
|
# finer control over whether to condition on image embeddings and text encodings
|
|
|
|
|
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
|
|
|
|
|
@@ -1514,7 +1669,7 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
# upsample klass
|
|
|
|
|
|
|
|
|
|
upsample_klass = ConvTransposeUpsample if not nearest_upsample else NearestUpsample
|
|
|
|
|
upsample_klass = NearestUpsample if not pixel_shuffle_upsample else PixelShuffleUpsample
|
|
|
|
|
|
|
|
|
|
# give memory efficient unet an initial resnet block
|
|
|
|
|
|
|
|
|
|
@@ -1578,6 +1733,8 @@ class Unet(nn.Module):
|
|
|
|
|
self.final_resnet_block = ResnetBlock(dim * 2, dim, time_cond_dim = time_cond_dim, groups = top_level_resnet_group)
|
|
|
|
|
self.to_out = nn.Conv2d(dim, self.channels_out, kernel_size = final_conv_kernel_size, padding = final_conv_kernel_size // 2)
|
|
|
|
|
|
|
|
|
|
zero_init_(self.to_out) # since both OpenAI and @crowsonkb are doing it
|
|
|
|
|
|
|
|
|
|
# if the current settings for the unet are not correct
|
|
|
|
|
# for cascading DDPM, then reinit the unet with the right settings
|
|
|
|
|
def cast_model_parameters(
|
|
|
|
|
@@ -1628,7 +1785,6 @@ class Unet(nn.Module):
|
|
|
|
|
image_embed,
|
|
|
|
|
lowres_cond_img = None,
|
|
|
|
|
text_encodings = None,
|
|
|
|
|
text_mask = None,
|
|
|
|
|
image_cond_drop_prob = 0.,
|
|
|
|
|
text_cond_drop_prob = 0.,
|
|
|
|
|
blur_sigma = None,
|
|
|
|
|
@@ -1700,21 +1856,27 @@ class Unet(nn.Module):
|
|
|
|
|
text_tokens = None
|
|
|
|
|
|
|
|
|
|
if exists(text_encodings) and self.cond_on_text_encodings:
|
|
|
|
|
assert text_encodings.shape[0] == batch_size, f'the text encodings being passed into the unet does not have the proper batch size - text encoding shape {text_encodings.shape} - required batch size is {batch_size}'
|
|
|
|
|
assert self.text_embed_dim == text_encodings.shape[-1], f'the text encodings you are passing in have a dimension of {text_encodings.shape[-1]}, but the unet was created with text_embed_dim of {self.text_embed_dim}.'
|
|
|
|
|
|
|
|
|
|
text_mask = torch.any(text_encodings != 0., dim = -1)
|
|
|
|
|
|
|
|
|
|
text_tokens = self.text_to_cond(text_encodings)
|
|
|
|
|
|
|
|
|
|
text_tokens = text_tokens[:, :self.max_text_len]
|
|
|
|
|
text_mask = text_mask[:, :self.max_text_len]
|
|
|
|
|
|
|
|
|
|
text_tokens_len = text_tokens.shape[1]
|
|
|
|
|
remainder = self.max_text_len - text_tokens_len
|
|
|
|
|
|
|
|
|
|
if remainder > 0:
|
|
|
|
|
text_tokens = F.pad(text_tokens, (0, 0, 0, remainder))
|
|
|
|
|
text_mask = F.pad(text_mask, (0, remainder), value = False)
|
|
|
|
|
|
|
|
|
|
if exists(text_mask):
|
|
|
|
|
if remainder > 0:
|
|
|
|
|
text_mask = F.pad(text_mask, (0, remainder), value = False)
|
|
|
|
|
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
|
|
|
|
|
|
|
|
|
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
|
|
|
|
text_keep_mask = text_mask & text_keep_mask
|
|
|
|
|
assert text_mask.shape[0] == text_keep_mask.shape[0], f'text_mask has shape of {text_mask.shape} while text_keep_mask has shape {text_keep_mask.shape}. text encoding is of shape {text_encodings.shape}'
|
|
|
|
|
text_keep_mask = text_mask & text_keep_mask
|
|
|
|
|
|
|
|
|
|
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
|
|
|
|
|
|
|
|
|
|
@@ -1795,17 +1957,16 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
downsample_first = True,
|
|
|
|
|
downsample_mode_nearest = False,
|
|
|
|
|
blur_prob = 0.5,
|
|
|
|
|
blur_sigma = 0.6,
|
|
|
|
|
blur_kernel_size = 3,
|
|
|
|
|
input_image_range = None
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.downsample_first = downsample_first
|
|
|
|
|
self.downsample_mode_nearest = downsample_mode_nearest
|
|
|
|
|
|
|
|
|
|
self.input_image_range = input_image_range
|
|
|
|
|
|
|
|
|
|
self.blur_prob = blur_prob
|
|
|
|
|
self.blur_sigma = blur_sigma
|
|
|
|
|
self.blur_kernel_size = blur_kernel_size
|
|
|
|
|
|
|
|
|
|
@@ -1818,20 +1979,27 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
blur_sigma = None,
|
|
|
|
|
blur_kernel_size = None
|
|
|
|
|
):
|
|
|
|
|
if self.training and self.downsample_first and exists(downsample_image_size):
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, downsample_image_size, clamp_range = self.input_image_range, nearest = self.downsample_mode_nearest)
|
|
|
|
|
if self.downsample_first and exists(downsample_image_size):
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, downsample_image_size, clamp_range = self.input_image_range, nearest = True)
|
|
|
|
|
|
|
|
|
|
# blur is only applied 50% of the time
|
|
|
|
|
# section 3.1 in https://arxiv.org/abs/2106.15282
|
|
|
|
|
|
|
|
|
|
if random.random() < self.blur_prob:
|
|
|
|
|
|
|
|
|
|
if self.training:
|
|
|
|
|
# when training, blur the low resolution conditional image
|
|
|
|
|
|
|
|
|
|
blur_sigma = default(blur_sigma, self.blur_sigma)
|
|
|
|
|
blur_kernel_size = default(blur_kernel_size, self.blur_kernel_size)
|
|
|
|
|
|
|
|
|
|
# allow for drawing a random sigma between lo and hi float values
|
|
|
|
|
|
|
|
|
|
if isinstance(blur_sigma, tuple):
|
|
|
|
|
blur_sigma = tuple(map(float, blur_sigma))
|
|
|
|
|
blur_sigma = random.uniform(*blur_sigma)
|
|
|
|
|
|
|
|
|
|
# allow for drawing a random kernel size between lo and hi int values
|
|
|
|
|
|
|
|
|
|
if isinstance(blur_kernel_size, tuple):
|
|
|
|
|
blur_kernel_size = tuple(map(int, blur_kernel_size))
|
|
|
|
|
kernel_size_lo, kernel_size_hi = blur_kernel_size
|
|
|
|
|
@@ -1839,8 +2007,7 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
|
|
|
|
|
cond_fmap = gaussian_blur2d(cond_fmap, cast_tuple(blur_kernel_size, 2), cast_tuple(blur_sigma, 2))
|
|
|
|
|
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, target_image_size, clamp_range = self.input_image_range)
|
|
|
|
|
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, target_image_size, clamp_range = self.input_image_range, nearest = True)
|
|
|
|
|
return cond_fmap
|
|
|
|
|
|
|
|
|
|
class Decoder(nn.Module):
|
|
|
|
|
@@ -1853,6 +2020,7 @@ class Decoder(nn.Module):
|
|
|
|
|
channels = 3,
|
|
|
|
|
vae = tuple(),
|
|
|
|
|
timesteps = 1000,
|
|
|
|
|
sample_timesteps = None,
|
|
|
|
|
image_cond_drop_prob = 0.1,
|
|
|
|
|
text_cond_drop_prob = 0.5,
|
|
|
|
|
loss_type = 'l2',
|
|
|
|
|
@@ -1862,7 +2030,7 @@ class Decoder(nn.Module):
|
|
|
|
|
image_sizes = None, # for cascading ddpm, image size at each stage
|
|
|
|
|
random_crop_sizes = None, # whether to random crop the image at that stage in the cascade (super resoluting convolutions at the end may be able to generalize on smaller crops)
|
|
|
|
|
lowres_downsample_first = True, # cascading ddpm - resizes to lower resolution, then to next conditional resolution + blur
|
|
|
|
|
lowres_downsample_mode_nearest = False, # cascading ddpm - whether to use nearest mode downsampling for lower resolution
|
|
|
|
|
blur_prob = 0.5, # cascading ddpm - when training, the gaussian blur is only applied 50% of the time
|
|
|
|
|
blur_sigma = 0.6, # cascading ddpm - blur sigma
|
|
|
|
|
blur_kernel_size = 3, # cascading ddpm - blur kernel size
|
|
|
|
|
clip_denoised = True,
|
|
|
|
|
@@ -1876,7 +2044,8 @@ class Decoder(nn.Module):
|
|
|
|
|
use_dynamic_thres = False, # from the Imagen paper
|
|
|
|
|
dynamic_thres_percentile = 0.9,
|
|
|
|
|
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
|
|
|
|
|
p2_loss_weight_k = 1
|
|
|
|
|
p2_loss_weight_k = 1,
|
|
|
|
|
ddim_sampling_eta = 1. # can be set to 0. for deterministic sampling afaict
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
|
|
@@ -1956,6 +2125,11 @@ class Decoder(nn.Module):
|
|
|
|
|
self.unets.append(one_unet)
|
|
|
|
|
self.vaes.append(one_vae.copy_for_eval())
|
|
|
|
|
|
|
|
|
|
# sampling timesteps, defaults to non-ddim with full timesteps sampling
|
|
|
|
|
|
|
|
|
|
self.sample_timesteps = cast_tuple(sample_timesteps, num_unets)
|
|
|
|
|
self.ddim_sampling_eta = ddim_sampling_eta
|
|
|
|
|
|
|
|
|
|
# create noise schedulers per unet
|
|
|
|
|
|
|
|
|
|
if not exists(beta_schedule):
|
|
|
|
|
@@ -1966,7 +2140,9 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.noise_schedulers = nn.ModuleList([])
|
|
|
|
|
|
|
|
|
|
for unet_beta_schedule, unet_p2_loss_weight_gamma in zip(beta_schedule, p2_loss_weight_gamma):
|
|
|
|
|
for ind, (unet_beta_schedule, unet_p2_loss_weight_gamma, sample_timesteps) in enumerate(zip(beta_schedule, p2_loss_weight_gamma, self.sample_timesteps)):
|
|
|
|
|
assert not exists(sample_timesteps) or sample_timesteps <= timesteps, f'sampling timesteps {sample_timesteps} must be less than or equal to the number of training timesteps {timesteps} for unet {ind + 1}'
|
|
|
|
|
|
|
|
|
|
noise_scheduler = NoiseScheduler(
|
|
|
|
|
beta_schedule = unet_beta_schedule,
|
|
|
|
|
timesteps = timesteps,
|
|
|
|
|
@@ -2005,7 +2181,7 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.to_lowres_cond = LowresConditioner(
|
|
|
|
|
downsample_first = lowres_downsample_first,
|
|
|
|
|
downsample_mode_nearest = lowres_downsample_mode_nearest,
|
|
|
|
|
blur_prob = blur_prob,
|
|
|
|
|
blur_sigma = blur_sigma,
|
|
|
|
|
blur_kernel_size = blur_kernel_size,
|
|
|
|
|
input_image_range = self.input_image_range
|
|
|
|
|
@@ -2067,10 +2243,30 @@ class Decoder(nn.Module):
|
|
|
|
|
for unet, device in zip(self.unets, devices):
|
|
|
|
|
unet.to(device)
|
|
|
|
|
|
|
|
|
|
def p_mean_variance(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, text_mask = None, lowres_cond_img = None, clip_denoised = True, predict_x_start = False, learned_variance = False, cond_scale = 1., model_output = None):
|
|
|
|
|
def dynamic_threshold(self, x):
|
|
|
|
|
""" proposed in https://arxiv.org/abs/2205.11487 as an improved clamping in the setting of classifier free guidance """
|
|
|
|
|
|
|
|
|
|
# s is the threshold amount
|
|
|
|
|
# static thresholding would just be s = 1
|
|
|
|
|
s = 1.
|
|
|
|
|
if self.use_dynamic_thres:
|
|
|
|
|
s = torch.quantile(
|
|
|
|
|
rearrange(x, 'b ... -> b (...)').abs(),
|
|
|
|
|
self.dynamic_thres_percentile,
|
|
|
|
|
dim = -1
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
s.clamp_(min = 1.)
|
|
|
|
|
s = s.view(-1, *((1,) * (x.ndim - 1)))
|
|
|
|
|
|
|
|
|
|
# clip by threshold, depending on whether static or dynamic
|
|
|
|
|
x = x.clamp(-s, s) / s
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
def p_mean_variance(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, lowres_cond_img = None, clip_denoised = True, predict_x_start = False, learned_variance = False, cond_scale = 1., model_output = None):
|
|
|
|
|
assert not (cond_scale != 1. and not self.can_classifier_guidance), 'the decoder was not trained with conditional dropout, and thus one cannot use classifier free guidance (cond_scale anything other than 1)'
|
|
|
|
|
|
|
|
|
|
pred = default(model_output, lambda: unet.forward_with_cond_scale(x, t, image_embed = image_embed, text_encodings = text_encodings, text_mask = text_mask, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img))
|
|
|
|
|
pred = default(model_output, lambda: unet.forward_with_cond_scale(x, t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img))
|
|
|
|
|
|
|
|
|
|
if learned_variance:
|
|
|
|
|
pred, var_interp_frac_unnormalized = pred.chunk(2, dim = 1)
|
|
|
|
|
@@ -2081,21 +2277,7 @@ class Decoder(nn.Module):
|
|
|
|
|
x_recon = noise_scheduler.predict_start_from_noise(x, t = t, noise = pred)
|
|
|
|
|
|
|
|
|
|
if clip_denoised:
|
|
|
|
|
# s is the threshold amount
|
|
|
|
|
# static thresholding would just be s = 1
|
|
|
|
|
s = 1.
|
|
|
|
|
if self.use_dynamic_thres:
|
|
|
|
|
s = torch.quantile(
|
|
|
|
|
rearrange(x_recon, 'b ... -> b (...)').abs(),
|
|
|
|
|
self.dynamic_thres_percentile,
|
|
|
|
|
dim = -1
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
s.clamp_(min = 1.)
|
|
|
|
|
s = s.view(-1, *((1,) * (x_recon.ndim - 1)))
|
|
|
|
|
|
|
|
|
|
# clip by threshold, depending on whether static or dynamic
|
|
|
|
|
x_recon = x_recon.clamp(-s, s) / s
|
|
|
|
|
x_recon = self.dynamic_threshold(x_recon)
|
|
|
|
|
|
|
|
|
|
model_mean, posterior_variance, posterior_log_variance = noise_scheduler.q_posterior(x_start=x_recon, x_t=x, t=t)
|
|
|
|
|
|
|
|
|
|
@@ -2116,16 +2298,16 @@ class Decoder(nn.Module):
|
|
|
|
|
return model_mean, posterior_variance, posterior_log_variance
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, text_mask = None, cond_scale = 1., lowres_cond_img = None, predict_x_start = False, learned_variance = False, clip_denoised = True):
|
|
|
|
|
def p_sample(self, unet, x, t, image_embed, noise_scheduler, text_encodings = None, cond_scale = 1., lowres_cond_img = None, predict_x_start = False, learned_variance = False, clip_denoised = True):
|
|
|
|
|
b, *_, device = *x.shape, x.device
|
|
|
|
|
model_mean, _, model_log_variance = self.p_mean_variance(unet, x = x, t = t, image_embed = image_embed, text_encodings = text_encodings, text_mask = text_mask, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img, clip_denoised = clip_denoised, predict_x_start = predict_x_start, noise_scheduler = noise_scheduler, learned_variance = learned_variance)
|
|
|
|
|
model_mean, _, model_log_variance = self.p_mean_variance(unet, x = x, t = t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img, clip_denoised = clip_denoised, predict_x_start = predict_x_start, noise_scheduler = noise_scheduler, learned_variance = learned_variance)
|
|
|
|
|
noise = torch.randn_like(x)
|
|
|
|
|
# no noise when t == 0
|
|
|
|
|
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
|
|
|
|
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop(self, unet, shape, image_embed, noise_scheduler, predict_x_start = False, learned_variance = False, clip_denoised = True, lowres_cond_img = None, text_encodings = None, text_mask = None, cond_scale = 1, is_latent_diffusion = False):
|
|
|
|
|
def p_sample_loop_ddpm(self, unet, shape, image_embed, noise_scheduler, predict_x_start = False, learned_variance = False, clip_denoised = True, lowres_cond_img = None, text_encodings = None, cond_scale = 1, is_latent_diffusion = False):
|
|
|
|
|
device = self.device
|
|
|
|
|
|
|
|
|
|
b = shape[0]
|
|
|
|
|
@@ -2141,7 +2323,6 @@ class Decoder(nn.Module):
|
|
|
|
|
torch.full((b,), i, device = device, dtype = torch.long),
|
|
|
|
|
image_embed = image_embed,
|
|
|
|
|
text_encodings = text_encodings,
|
|
|
|
|
text_mask = text_mask,
|
|
|
|
|
cond_scale = cond_scale,
|
|
|
|
|
lowres_cond_img = lowres_cond_img,
|
|
|
|
|
predict_x_start = predict_x_start,
|
|
|
|
|
@@ -2153,7 +2334,66 @@ class Decoder(nn.Module):
|
|
|
|
|
unnormalize_img = self.unnormalize_img(img)
|
|
|
|
|
return unnormalize_img
|
|
|
|
|
|
|
|
|
|
def p_losses(self, unet, x_start, times, *, image_embed, noise_scheduler, lowres_cond_img = None, text_encodings = None, text_mask = None, predict_x_start = False, noise = None, learned_variance = False, clip_denoised = False, is_latent_diffusion = False):
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop_ddim(self, unet, shape, image_embed, noise_scheduler, timesteps, eta = 1., predict_x_start = False, learned_variance = False, clip_denoised = True, lowres_cond_img = None, text_encodings = None, cond_scale = 1, is_latent_diffusion = False):
|
|
|
|
|
batch, device, total_timesteps, alphas, eta = shape[0], self.device, noise_scheduler.num_timesteps, noise_scheduler.alphas_cumprod_prev, self.ddim_sampling_eta
|
|
|
|
|
|
|
|
|
|
times = torch.linspace(0., total_timesteps, steps = timesteps + 2)[:-1]
|
|
|
|
|
|
|
|
|
|
times = list(reversed(times.int().tolist()))
|
|
|
|
|
time_pairs = list(zip(times[:-1], times[1:]))
|
|
|
|
|
|
|
|
|
|
img = torch.randn(shape, device = device)
|
|
|
|
|
|
|
|
|
|
if not is_latent_diffusion:
|
|
|
|
|
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
|
|
|
|
|
|
|
|
|
|
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
|
|
|
|
|
alpha = alphas[time]
|
|
|
|
|
alpha_next = alphas[time_next]
|
|
|
|
|
|
|
|
|
|
time_cond = torch.full((batch,), time, device = device, dtype = torch.long)
|
|
|
|
|
|
|
|
|
|
pred = unet.forward_with_cond_scale(img, time_cond, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img)
|
|
|
|
|
|
|
|
|
|
if learned_variance:
|
|
|
|
|
pred, _ = pred.chunk(2, dim = 1)
|
|
|
|
|
|
|
|
|
|
if predict_x_start:
|
|
|
|
|
x_start = pred
|
|
|
|
|
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = pred)
|
|
|
|
|
else:
|
|
|
|
|
x_start = noise_scheduler.predict_start_from_noise(img, t = time_cond, noise = pred)
|
|
|
|
|
pred_noise = pred
|
|
|
|
|
|
|
|
|
|
if clip_denoised:
|
|
|
|
|
x_start = self.dynamic_threshold(x_start)
|
|
|
|
|
|
|
|
|
|
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
|
|
|
|
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
|
|
|
|
noise = torch.randn_like(img) if time_next > 0 else 0.
|
|
|
|
|
|
|
|
|
|
img = x_start * alpha_next.sqrt() + \
|
|
|
|
|
c1 * noise + \
|
|
|
|
|
c2 * pred_noise
|
|
|
|
|
|
|
|
|
|
img = self.unnormalize_img(img)
|
|
|
|
|
return img
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def p_sample_loop(self, *args, noise_scheduler, timesteps = None, **kwargs):
|
|
|
|
|
num_timesteps = noise_scheduler.num_timesteps
|
|
|
|
|
|
|
|
|
|
timesteps = default(timesteps, num_timesteps)
|
|
|
|
|
assert timesteps <= num_timesteps
|
|
|
|
|
is_ddim = timesteps < num_timesteps
|
|
|
|
|
|
|
|
|
|
if not is_ddim:
|
|
|
|
|
return self.p_sample_loop_ddpm(*args, noise_scheduler = noise_scheduler, **kwargs)
|
|
|
|
|
|
|
|
|
|
return self.p_sample_loop_ddim(*args, noise_scheduler = noise_scheduler, timesteps = timesteps, **kwargs)
|
|
|
|
|
|
|
|
|
|
def p_losses(self, unet, x_start, times, *, image_embed, noise_scheduler, lowres_cond_img = None, text_encodings = None, predict_x_start = False, noise = None, learned_variance = False, clip_denoised = False, is_latent_diffusion = False):
|
|
|
|
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
|
|
|
|
|
|
|
|
|
# normalize to [-1, 1]
|
|
|
|
|
@@ -2171,7 +2411,6 @@ class Decoder(nn.Module):
|
|
|
|
|
times,
|
|
|
|
|
image_embed = image_embed,
|
|
|
|
|
text_encodings = text_encodings,
|
|
|
|
|
text_mask = text_mask,
|
|
|
|
|
lowres_cond_img = lowres_cond_img,
|
|
|
|
|
image_cond_drop_prob = self.image_cond_drop_prob,
|
|
|
|
|
text_cond_drop_prob = self.text_cond_drop_prob,
|
|
|
|
|
@@ -2231,7 +2470,6 @@ class Decoder(nn.Module):
|
|
|
|
|
self,
|
|
|
|
|
image_embed = None,
|
|
|
|
|
text = None,
|
|
|
|
|
text_mask = None,
|
|
|
|
|
text_encodings = None,
|
|
|
|
|
batch_size = 1,
|
|
|
|
|
cond_scale = 1.,
|
|
|
|
|
@@ -2245,7 +2483,7 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
if exists(text) and not exists(text_encodings) and not self.unconditional:
|
|
|
|
|
assert exists(self.clip)
|
|
|
|
|
_, text_encodings, text_mask = self.clip.embed_text(text)
|
|
|
|
|
_, text_encodings = self.clip.embed_text(text)
|
|
|
|
|
|
|
|
|
|
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
|
|
|
|
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
|
|
|
|
|
@@ -2253,7 +2491,10 @@ class Decoder(nn.Module):
|
|
|
|
|
img = None
|
|
|
|
|
is_cuda = next(self.parameters()).is_cuda
|
|
|
|
|
|
|
|
|
|
for unet_number, unet, vae, channel, image_size, predict_x_start, learned_variance, noise_scheduler in tqdm(zip(range(1, len(self.unets) + 1), self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start, self.learned_variance, self.noise_schedulers)):
|
|
|
|
|
num_unets = len(self.unets)
|
|
|
|
|
cond_scale = cast_tuple(cond_scale, num_unets)
|
|
|
|
|
|
|
|
|
|
for unet_number, unet, vae, channel, image_size, predict_x_start, learned_variance, noise_scheduler, sample_timesteps, unet_cond_scale in tqdm(zip(range(1, num_unets + 1), self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start, self.learned_variance, self.noise_schedulers, self.sample_timesteps, cond_scale)):
|
|
|
|
|
|
|
|
|
|
context = self.one_unet_in_gpu(unet = unet) if is_cuda and not distributed else null_context()
|
|
|
|
|
|
|
|
|
|
@@ -2262,7 +2503,7 @@ class Decoder(nn.Module):
|
|
|
|
|
shape = (batch_size, channel, image_size, image_size)
|
|
|
|
|
|
|
|
|
|
if unet.lowres_cond:
|
|
|
|
|
lowres_cond_img = self.to_lowres_cond(img, target_image_size = image_size)
|
|
|
|
|
lowres_cond_img = resize_image_to(img, target_image_size = image_size, clamp_range = self.input_image_range, nearest = True)
|
|
|
|
|
|
|
|
|
|
is_latent_diffusion = isinstance(vae, VQGanVAE)
|
|
|
|
|
image_size = vae.get_encoded_fmap_size(image_size)
|
|
|
|
|
@@ -2275,14 +2516,14 @@ class Decoder(nn.Module):
|
|
|
|
|
shape,
|
|
|
|
|
image_embed = image_embed,
|
|
|
|
|
text_encodings = text_encodings,
|
|
|
|
|
text_mask = text_mask,
|
|
|
|
|
cond_scale = cond_scale,
|
|
|
|
|
cond_scale = unet_cond_scale,
|
|
|
|
|
predict_x_start = predict_x_start,
|
|
|
|
|
learned_variance = learned_variance,
|
|
|
|
|
clip_denoised = not is_latent_diffusion,
|
|
|
|
|
lowres_cond_img = lowres_cond_img,
|
|
|
|
|
is_latent_diffusion = is_latent_diffusion,
|
|
|
|
|
noise_scheduler = noise_scheduler
|
|
|
|
|
noise_scheduler = noise_scheduler,
|
|
|
|
|
timesteps = sample_timesteps
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
img = vae.decode(img)
|
|
|
|
|
@@ -2298,7 +2539,6 @@ class Decoder(nn.Module):
|
|
|
|
|
text = None,
|
|
|
|
|
image_embed = None,
|
|
|
|
|
text_encodings = None,
|
|
|
|
|
text_mask = None,
|
|
|
|
|
unet_number = None,
|
|
|
|
|
return_lowres_cond_image = False # whether to return the low resolution conditioning images, for debugging upsampler purposes
|
|
|
|
|
):
|
|
|
|
|
@@ -2327,13 +2567,13 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
if exists(text) and not exists(text_encodings) and not self.unconditional:
|
|
|
|
|
assert exists(self.clip), 'if you are passing in raw text, you need to supply `clip` to the decoder'
|
|
|
|
|
_, text_encodings, text_mask = self.clip.embed_text(text)
|
|
|
|
|
_, text_encodings = self.clip.embed_text(text)
|
|
|
|
|
|
|
|
|
|
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
|
|
|
|
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
|
|
|
|
|
|
|
|
|
|
lowres_cond_img = self.to_lowres_cond(image, target_image_size = target_image_size, downsample_image_size = self.image_sizes[unet_index - 1]) if unet_number > 1 else None
|
|
|
|
|
image = resize_image_to(image, target_image_size)
|
|
|
|
|
image = resize_image_to(image, target_image_size, nearest = True)
|
|
|
|
|
|
|
|
|
|
if exists(random_crop_size):
|
|
|
|
|
aug = K.RandomCrop((random_crop_size, random_crop_size), p = 1.)
|
|
|
|
|
@@ -2350,7 +2590,7 @@ class Decoder(nn.Module):
|
|
|
|
|
image = vae.encode(image)
|
|
|
|
|
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
|
|
|
|
|
|
|
|
|
|
losses = self.p_losses(unet, image, times, image_embed = image_embed, text_encodings = text_encodings, text_mask = text_mask, lowres_cond_img = lowres_cond_img, predict_x_start = predict_x_start, learned_variance = learned_variance, is_latent_diffusion = is_latent_diffusion, noise_scheduler = noise_scheduler)
|
|
|
|
|
losses = self.p_losses(unet, image, times, image_embed = image_embed, text_encodings = text_encodings, lowres_cond_img = lowres_cond_img, predict_x_start = predict_x_start, learned_variance = learned_variance, is_latent_diffusion = is_latent_diffusion, noise_scheduler = noise_scheduler)
|
|
|
|
|
|
|
|
|
|
if not return_lowres_cond_image:
|
|
|
|
|
return losses
|
|
|
|
|
|