mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 12:25:28 +01:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3dda2570ed | ||
|
|
2f3c02dba8 | ||
|
|
908088cfea | ||
|
|
8dc8a3de0d |
@@ -1003,6 +1003,7 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
- [x] pull logic for training diffusion prior into a class DiffusionPriorTrainer, for eventual script based + CLI based training
|
- [x] pull logic for training diffusion prior into a class DiffusionPriorTrainer, for eventual script based + CLI based training
|
||||||
- [x] make sure the cascading ddpm in the repository can be trained unconditionally, offer a one-line CLI tool for training on a folder of images
|
- [x] make sure the cascading ddpm in the repository can be trained unconditionally, offer a one-line CLI tool for training on a folder of images
|
||||||
- [x] bring in cross-scale embedding from iclr paper https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/crossformer.py#L14
|
- [x] bring in cross-scale embedding from iclr paper https://github.com/lucidrains/vit-pytorch/blob/main/vit_pytorch/crossformer.py#L14
|
||||||
|
- [x] cross embed layers for downsampling, as an option
|
||||||
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet (test out unet² in ddpm repo) - consider https://github.com/lucidrains/uformer-pytorch attention-based unet
|
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet (test out unet² in ddpm repo) - consider https://github.com/lucidrains/uformer-pytorch attention-based unet
|
||||||
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
||||||
- [ ] train on a toy task, offer in colab
|
- [ ] train on a toy task, offer in colab
|
||||||
|
|||||||
@@ -41,9 +41,6 @@ def exists(val):
|
|||||||
def identity(t, *args, **kwargs):
|
def identity(t, *args, **kwargs):
|
||||||
return t
|
return t
|
||||||
|
|
||||||
def is_odd(n):
|
|
||||||
return (n % 2) == 1
|
|
||||||
|
|
||||||
def default(val, d):
|
def default(val, d):
|
||||||
if exists(val):
|
if exists(val):
|
||||||
return val
|
return val
|
||||||
@@ -306,7 +303,7 @@ def cosine_beta_schedule(timesteps, s = 0.008):
|
|||||||
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
|
||||||
"""
|
"""
|
||||||
steps = timesteps + 1
|
steps = timesteps + 1
|
||||||
x = torch.linspace(0, timesteps, steps)
|
x = torch.linspace(0, timesteps, steps, dtype = torch.float64)
|
||||||
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
|
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2
|
||||||
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
||||||
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
|
||||||
@@ -317,21 +314,21 @@ def linear_beta_schedule(timesteps):
|
|||||||
scale = 1000 / timesteps
|
scale = 1000 / timesteps
|
||||||
beta_start = scale * 0.0001
|
beta_start = scale * 0.0001
|
||||||
beta_end = scale * 0.02
|
beta_end = scale * 0.02
|
||||||
return torch.linspace(beta_start, beta_end, timesteps)
|
return torch.linspace(beta_start, beta_end, timesteps, dtype = torch.float64)
|
||||||
|
|
||||||
|
|
||||||
def quadratic_beta_schedule(timesteps):
|
def quadratic_beta_schedule(timesteps):
|
||||||
scale = 1000 / timesteps
|
scale = 1000 / timesteps
|
||||||
beta_start = scale * 0.0001
|
beta_start = scale * 0.0001
|
||||||
beta_end = scale * 0.02
|
beta_end = scale * 0.02
|
||||||
return torch.linspace(beta_start**2, beta_end**2, timesteps) ** 2
|
return torch.linspace(beta_start**2, beta_end**2, timesteps, dtype = torch.float64) ** 2
|
||||||
|
|
||||||
|
|
||||||
def sigmoid_beta_schedule(timesteps):
|
def sigmoid_beta_schedule(timesteps):
|
||||||
scale = 1000 / timesteps
|
scale = 1000 / timesteps
|
||||||
beta_start = scale * 0.0001
|
beta_start = scale * 0.0001
|
||||||
beta_end = scale * 0.02
|
beta_end = scale * 0.02
|
||||||
betas = torch.linspace(-6, 6, timesteps)
|
betas = torch.linspace(-6, 6, timesteps, dtype = torch.float64)
|
||||||
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
||||||
|
|
||||||
|
|
||||||
@@ -371,17 +368,21 @@ class BaseGaussianDiffusion(nn.Module):
|
|||||||
self.loss_type = loss_type
|
self.loss_type = loss_type
|
||||||
self.loss_fn = loss_fn
|
self.loss_fn = loss_fn
|
||||||
|
|
||||||
self.register_buffer('betas', betas)
|
# register buffer helper function to cast double back to float
|
||||||
self.register_buffer('alphas_cumprod', alphas_cumprod)
|
|
||||||
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
register_buffer = lambda name, val: self.register_buffer(name, val.to(torch.float32))
|
||||||
|
|
||||||
|
register_buffer('betas', betas)
|
||||||
|
register_buffer('alphas_cumprod', alphas_cumprod)
|
||||||
|
register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
||||||
|
|
||||||
# calculations for diffusion q(x_t | x_{t-1}) and others
|
# calculations for diffusion q(x_t | x_{t-1}) and others
|
||||||
|
|
||||||
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
||||||
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
||||||
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
||||||
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
||||||
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
||||||
|
|
||||||
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
||||||
|
|
||||||
@@ -389,13 +390,13 @@ class BaseGaussianDiffusion(nn.Module):
|
|||||||
|
|
||||||
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
||||||
|
|
||||||
self.register_buffer('posterior_variance', posterior_variance)
|
register_buffer('posterior_variance', posterior_variance)
|
||||||
|
|
||||||
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
||||||
|
|
||||||
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
||||||
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
||||||
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
||||||
|
|
||||||
def q_mean_variance(self, x_start, t):
|
def q_mean_variance(self, x_start, t):
|
||||||
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
||||||
@@ -1235,12 +1236,13 @@ class CrossEmbedLayer(nn.Module):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
dim_in,
|
dim_in,
|
||||||
dim_out,
|
|
||||||
kernel_sizes,
|
kernel_sizes,
|
||||||
|
dim_out = None,
|
||||||
stride = 2
|
stride = 2
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
assert all([*map(is_odd, kernel_sizes)])
|
assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])
|
||||||
|
dim_out = default(dim_out, dim_in)
|
||||||
|
|
||||||
kernel_sizes = sorted(kernel_sizes)
|
kernel_sizes = sorted(kernel_sizes)
|
||||||
num_scales = len(kernel_sizes)
|
num_scales = len(kernel_sizes)
|
||||||
@@ -1282,6 +1284,8 @@ class Unet(nn.Module):
|
|||||||
init_conv_kernel_size = 7,
|
init_conv_kernel_size = 7,
|
||||||
resnet_groups = 8,
|
resnet_groups = 8,
|
||||||
init_cross_embed_kernel_sizes = (3, 7, 15),
|
init_cross_embed_kernel_sizes = (3, 7, 15),
|
||||||
|
cross_embed_downsample = False,
|
||||||
|
cross_embed_downsample_kernel_sizes = (2, 4),
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -1302,7 +1306,7 @@ class Unet(nn.Module):
|
|||||||
init_channels = channels if not lowres_cond else channels * 2 # in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
|
init_channels = channels if not lowres_cond else channels * 2 # in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis
|
||||||
init_dim = default(init_dim, dim // 3 * 2)
|
init_dim = default(init_dim, dim // 3 * 2)
|
||||||
|
|
||||||
self.init_conv = CrossEmbedLayer(init_channels, init_dim, init_cross_embed_kernel_sizes, stride = 1)
|
self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1)
|
||||||
|
|
||||||
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
|
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
|
||||||
in_out = list(zip(dims[:-1], dims[1:]))
|
in_out = list(zip(dims[:-1], dims[1:]))
|
||||||
@@ -1362,6 +1366,12 @@ class Unet(nn.Module):
|
|||||||
|
|
||||||
assert len(resnet_groups) == len(in_out)
|
assert len(resnet_groups) == len(in_out)
|
||||||
|
|
||||||
|
# downsample klass
|
||||||
|
|
||||||
|
downsample_klass = Downsample
|
||||||
|
if cross_embed_downsample:
|
||||||
|
downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)
|
||||||
|
|
||||||
# layers
|
# layers
|
||||||
|
|
||||||
self.downs = nn.ModuleList([])
|
self.downs = nn.ModuleList([])
|
||||||
@@ -1377,7 +1387,7 @@ class Unet(nn.Module):
|
|||||||
ResnetBlock(dim_in, dim_out, time_cond_dim = time_cond_dim, groups = groups),
|
ResnetBlock(dim_in, dim_out, time_cond_dim = time_cond_dim, groups = groups),
|
||||||
Residual(LinearAttention(dim_out, **attn_kwargs)) if sparse_attn else nn.Identity(),
|
Residual(LinearAttention(dim_out, **attn_kwargs)) if sparse_attn else nn.Identity(),
|
||||||
ResnetBlock(dim_out, dim_out, cond_dim = layer_cond_dim, time_cond_dim = time_cond_dim, groups = groups),
|
ResnetBlock(dim_out, dim_out, cond_dim = layer_cond_dim, time_cond_dim = time_cond_dim, groups = groups),
|
||||||
Downsample(dim_out) if not is_last else nn.Identity()
|
downsample_klass(dim_out) if not is_last else nn.Identity()
|
||||||
]))
|
]))
|
||||||
|
|
||||||
mid_dim = dims[-1]
|
mid_dim = dims[-1]
|
||||||
@@ -1482,11 +1492,12 @@ class Unet(nn.Module):
|
|||||||
|
|
||||||
if self.cond_on_image_embeds:
|
if self.cond_on_image_embeds:
|
||||||
image_tokens = self.image_to_cond(image_embed)
|
image_tokens = self.image_to_cond(image_embed)
|
||||||
|
null_image_embed = self.null_image_embed.to(image_tokens.dtype) # for some reason pytorch AMP not working
|
||||||
|
|
||||||
image_tokens = torch.where(
|
image_tokens = torch.where(
|
||||||
image_keep_mask,
|
image_keep_mask,
|
||||||
image_tokens,
|
image_tokens,
|
||||||
self.null_image_embed
|
null_image_embed
|
||||||
)
|
)
|
||||||
|
|
||||||
# take care of text encodings (optional)
|
# take care of text encodings (optional)
|
||||||
@@ -1510,10 +1521,12 @@ class Unet(nn.Module):
|
|||||||
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
||||||
text_keep_mask = text_mask & text_keep_mask
|
text_keep_mask = text_mask & text_keep_mask
|
||||||
|
|
||||||
|
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
|
||||||
|
|
||||||
text_tokens = torch.where(
|
text_tokens = torch.where(
|
||||||
text_keep_mask,
|
text_keep_mask,
|
||||||
text_tokens,
|
text_tokens,
|
||||||
self.null_text_embed
|
null_text_embed
|
||||||
)
|
)
|
||||||
|
|
||||||
# main conditioning tokens (c)
|
# main conditioning tokens (c)
|
||||||
|
|||||||
Reference in New Issue
Block a user