|
|
|
|
@@ -303,7 +303,7 @@ def cosine_beta_schedule(timesteps, s = 0.008):
|
|
|
|
|
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
|
|
|
|
"""
|
|
|
|
|
steps = timesteps + 1
|
|
|
|
|
x = torch.linspace(0, timesteps, steps)
|
|
|
|
|
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
|
|
|
|
|
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
|
|
|
|
|
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
|
|
|
|
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
|
|
|
|
@@ -314,21 +314,21 @@ def linear_beta_schedule(timesteps):
|
|
|
|
|
scale = 1000 / timesteps
|
|
|
|
|
beta_start = scale * 0.0001
|
|
|
|
|
beta_end = scale * 0.02
|
|
|
|
|
return torch.linspace(beta_start, beta_end, timesteps)
|
|
|
|
|
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def quadratic_beta_schedule(timesteps):
|
|
|
|
|
scale = 1000 / timesteps
|
|
|
|
|
beta_start = scale * 0.0001
|
|
|
|
|
beta_end = scale * 0.02
|
|
|
|
|
return torch.linspace(beta_start**2, beta_end**2, timesteps) ** 2
|
|
|
|
|
return torch.linspace(beta_start**2, beta_end**2, timesteps, dtype = torch.float64) ** 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def sigmoid_beta_schedule(timesteps):
|
|
|
|
|
scale = 1000 / timesteps
|
|
|
|
|
beta_start = scale * 0.0001
|
|
|
|
|
beta_end = scale * 0.02
|
|
|
|
|
betas = torch.linspace(-6, 6, timesteps)
|
|
|
|
|
betas = torch.linspace(-6, 6, timesteps, dtype = torch.float64)
|
|
|
|
|
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -368,17 +368,21 @@ class BaseGaussianDiffusion(nn.Module):
|
|
|
|
|
self.loss_type = loss_type
|
|
|
|
|
self.loss_fn = loss_fn
|
|
|
|
|
|
|
|
|
|
self.register_buffer('betas', betas)
|
|
|
|
|
self.register_buffer('alphas_cumprod', alphas_cumprod)
|
|
|
|
|
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
|
|
|
|
# register buffer helper function to cast double back to float
|
|
|
|
|
|
|
|
|
|
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
|
|
|
|
|
|
|
|
|
|
register_buffer('betas', betas)
|
|
|
|
|
register_buffer('alphas_cumprod', alphas_cumprod)
|
|
|
|
|
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
|
|
|
|
|
|
|
|
|
# calculations for diffusion q(x_t | x_{t-1}) and others
|
|
|
|
|
|
|
|
|
|
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
|
|
|
|
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
|
|
|
|
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
|
|
|
|
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
|
|
|
|
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
|
|
|
|
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
|
|
|
|
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
|
|
|
|
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
|
|
|
|
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
|
|
|
|
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
|
|
|
|
|
|
|
|
|
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
|
|
|
|
|
|
|
|
|
@@ -386,13 +390,13 @@ class BaseGaussianDiffusion(nn.Module):
|
|
|
|
|
|
|
|
|
|
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
|
|
|
|
|
|
|
|
|
self.register_buffer('posterior_variance', posterior_variance)
|
|
|
|
|
register_buffer('posterior_variance', posterior_variance)
|
|
|
|
|
|
|
|
|
|
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
|
|
|
|
|
|
|
|
|
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
|
|
|
|
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
|
|
|
|
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
|
|
|
|
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
|
|
|
|
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
|
|
|
|
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
|
|
|
|
|
|
|
|
|
def q_mean_variance(self, x_start, t):
|
|
|
|
|
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
|
|
|
|
@@ -827,7 +831,7 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|
|
|
|
image_channels = 3,
|
|
|
|
|
timesteps = 1000,
|
|
|
|
|
cond_drop_prob = 0.,
|
|
|
|
|
loss_type = "l1",
|
|
|
|
|
loss_type = "l2",
|
|
|
|
|
predict_x_start = True,
|
|
|
|
|
beta_schedule = "cosine",
|
|
|
|
|
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
|
|
|
|
@@ -1488,11 +1492,12 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
if self.cond_on_image_embeds:
|
|
|
|
|
image_tokens = self.image_to_cond(image_embed)
|
|
|
|
|
null_image_embed = self.null_image_embed.to(image_tokens.dtype) # for some reason pytorch AMP not working
|
|
|
|
|
|
|
|
|
|
image_tokens = torch.where(
|
|
|
|
|
image_keep_mask,
|
|
|
|
|
image_tokens,
|
|
|
|
|
self.null_image_embed
|
|
|
|
|
null_image_embed
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# take care of text encodings (optional)
|
|
|
|
|
@@ -1516,10 +1521,12 @@ class Unet(nn.Module):
|
|
|
|
|
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
|
|
|
|
text_keep_mask = text_mask & text_keep_mask
|
|
|
|
|
|
|
|
|
|
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
|
|
|
|
|
|
|
|
|
|
text_tokens = torch.where(
|
|
|
|
|
text_keep_mask,
|
|
|
|
|
text_tokens,
|
|
|
|
|
self.null_text_embed
|
|
|
|
|
null_text_embed
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# main conditioning tokens (c)
|
|
|
|
|
@@ -1607,7 +1614,7 @@ class Decoder(BaseGaussianDiffusion):
|
|
|
|
|
timesteps = 1000,
|
|
|
|
|
image_cond_drop_prob = 0.1,
|
|
|
|
|
text_cond_drop_prob = 0.5,
|
|
|
|
|
loss_type = 'l1',
|
|
|
|
|
loss_type = 'l2',
|
|
|
|
|
beta_schedule = 'cosine',
|
|
|
|
|
predict_x_start = False,
|
|
|
|
|
predict_x_start_for_latent_diffusion = False,
|
|
|
|
|
|