mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 22:44:23 +01:00
Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fa3bb6ba5c | ||
|
|
2705e7c9b0 | ||
|
|
77141882c8 | ||
|
|
4075d02139 | ||
|
|
de0296106b | ||
|
|
eafb136214 | ||
|
|
bfbcc283a3 | ||
|
|
c30544b73a | ||
|
|
bdf5e9c009 |
64
README.md
64
README.md
@@ -446,6 +446,55 @@ loss.backward()
|
|||||||
# now the diffusion prior can generate image embeddings from the text embeddings
|
# now the diffusion prior can generate image embeddings from the text embeddings
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You can also completely go `CLIP`-less, in which case you will need to pass in the `image_embed_dim` into the `DiffusionPrior` on initialization
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from dalle2_pytorch import DiffusionPriorNetwork, DiffusionPrior
|
||||||
|
|
||||||
|
# setup prior network, which contains an autoregressive transformer
|
||||||
|
|
||||||
|
prior_network = DiffusionPriorNetwork(
|
||||||
|
dim = 512,
|
||||||
|
depth = 6,
|
||||||
|
dim_head = 64,
|
||||||
|
heads = 8
|
||||||
|
).cuda()
|
||||||
|
|
||||||
|
# diffusion prior network, which contains the CLIP and network (with transformer) above
|
||||||
|
|
||||||
|
diffusion_prior = DiffusionPrior(
|
||||||
|
net = prior_network,
|
||||||
|
image_embed_dim = 512, # this needs to be set
|
||||||
|
timesteps = 100,
|
||||||
|
cond_drop_prob = 0.2,
|
||||||
|
condition_on_text_encodings = False # this probably should be true, but just to get Laion started
|
||||||
|
).cuda()
|
||||||
|
|
||||||
|
# mock data
|
||||||
|
|
||||||
|
text = torch.randint(0, 49408, (4, 256)).cuda()
|
||||||
|
images = torch.randn(4, 3, 256, 256).cuda()
|
||||||
|
|
||||||
|
# precompute the text and image embeddings
|
||||||
|
# here using the diffusion prior class, but could be done with CLIP alone
|
||||||
|
|
||||||
|
clip_image_embeds = torch.randn(4, 512).cuda()
|
||||||
|
clip_text_embeds = torch.randn(4, 512).cuda()
|
||||||
|
|
||||||
|
# feed text and images into diffusion prior network
|
||||||
|
|
||||||
|
loss = diffusion_prior(
|
||||||
|
text_embed = clip_text_embeds,
|
||||||
|
image_embed = clip_image_embeds
|
||||||
|
)
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
|
||||||
|
# do the above for many many many steps
|
||||||
|
# now the diffusion prior can generate image embeddings from the text embeddings
|
||||||
|
```
|
||||||
|
|
||||||
## Experimental
|
## Experimental
|
||||||
|
|
||||||
### DALL-E2 with Latent Diffusion
|
### DALL-E2 with Latent Diffusion
|
||||||
@@ -594,14 +643,15 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
- [x] build out latent diffusion architecture, with the vq-reg variant (vqgan-vae), make it completely optional and compatible with cascading ddpms
|
- [x] build out latent diffusion architecture, with the vq-reg variant (vqgan-vae), make it completely optional and compatible with cascading ddpms
|
||||||
- [x] for decoder, allow ability to customize objective (predict epsilon vs x0), in case latent diffusion does better with prediction of x0
|
- [x] for decoder, allow ability to customize objective (predict epsilon vs x0), in case latent diffusion does better with prediction of x0
|
||||||
- [x] use attention-based upsampling https://arxiv.org/abs/2112.11435
|
- [x] use attention-based upsampling https://arxiv.org/abs/2112.11435
|
||||||
- [ ] spend one day cleaning up tech debt in decoder
|
- [x] use inheritance just this once for sharing logic between decoder and prior network ddpms
|
||||||
|
- [x] bring in vit-vqgan https://arxiv.org/abs/2110.04627 for the latent diffusion
|
||||||
|
- [ ] abstract interface for CLIP adapter class, so other CLIPs can be brought in
|
||||||
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet
|
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet
|
||||||
- [ ] copy the cascading ddpm code to a separate repo (perhaps https://github.com/lucidrains/denoising-diffusion-pytorch) as the main contribution of dalle2 really is just the prior network
|
- [ ] copy the cascading ddpm code to a separate repo (perhaps https://github.com/lucidrains/denoising-diffusion-pytorch) as the main contribution of dalle2 really is just the prior network
|
||||||
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
||||||
- [ ] train on a toy task, offer in colab
|
- [ ] train on a toy task, offer in colab
|
||||||
- [ ] extend diffusion head to use diffusion-gan (potentially using lightweight-gan) to speed up inference
|
- [ ] extend diffusion head to use diffusion-gan (potentially using lightweight-gan) to speed up inference
|
||||||
- [ ] bring in tools to train vqgan-vae
|
- [ ] bring in tools to train vqgan-vae
|
||||||
- [ ] bring in vit-vqgan https://arxiv.org/abs/2110.04627 for the latent diffusion
|
|
||||||
|
|
||||||
## Citations
|
## Citations
|
||||||
|
|
||||||
@@ -647,4 +697,14 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@article{Yu2021VectorquantizedIM,
|
||||||
|
title = {Vector-quantized Image Modeling with Improved VQGAN},
|
||||||
|
author = {Jiahui Yu and Xin Li and Jing Yu Koh and Han Zhang and Ruoming Pang and James Qin and Alexander Ku and Yuanzhong Xu and Jason Baldridge and Yonghui Wu},
|
||||||
|
journal = {ArXiv},
|
||||||
|
year = {2021},
|
||||||
|
volume = {abs/2110.04627}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
*Creating noise from data is easy; creating data from noise is generative modeling.* - Yang Song's <a href="https://arxiv.org/abs/2011.13456">paper</a>
|
*Creating noise from data is easy; creating data from noise is generative modeling.* - Yang Song's <a href="https://arxiv.org/abs/2011.13456">paper</a>
|
||||||
|
|||||||
@@ -1,125 +0,0 @@
|
|||||||
import torch
|
|
||||||
from torch import nn, einsum
|
|
||||||
import torch.nn.functional as F
|
|
||||||
|
|
||||||
from einops import rearrange, repeat
|
|
||||||
|
|
||||||
class LayerNormChan(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
dim,
|
|
||||||
eps = 1e-5
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.eps = eps
|
|
||||||
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
|
|
||||||
mean = torch.mean(x, dim = 1, keepdim = True)
|
|
||||||
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
|
||||||
|
|
||||||
# attention-based upsampling
|
|
||||||
# from https://arxiv.org/abs/2112.11435
|
|
||||||
|
|
||||||
class QueryAndAttend(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
*,
|
|
||||||
dim,
|
|
||||||
num_queries = 1,
|
|
||||||
dim_head = 32,
|
|
||||||
heads = 8,
|
|
||||||
window_size = 3
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.scale = dim_head ** -0.5
|
|
||||||
inner_dim = dim_head * heads
|
|
||||||
self.heads = heads
|
|
||||||
self.dim_head = dim_head
|
|
||||||
self.window_size = window_size
|
|
||||||
self.num_queries = num_queries
|
|
||||||
|
|
||||||
self.rel_pos_bias = nn.Parameter(torch.randn(heads, num_queries, window_size * window_size, 1, 1))
|
|
||||||
|
|
||||||
self.queries = nn.Parameter(torch.randn(heads, num_queries, dim_head))
|
|
||||||
self.to_kv = nn.Conv2d(dim, dim_head * 2, 1, bias = False)
|
|
||||||
self.to_out = nn.Conv2d(inner_dim, dim, 1, bias = False)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
"""
|
|
||||||
einstein notation
|
|
||||||
b - batch
|
|
||||||
h - heads
|
|
||||||
l - num queries
|
|
||||||
d - head dimension
|
|
||||||
x - height
|
|
||||||
y - width
|
|
||||||
j - source sequence for attending to (kernel size squared in this case)
|
|
||||||
"""
|
|
||||||
|
|
||||||
wsz, heads, dim_head, num_queries = self.window_size, self.heads, self.dim_head, self.num_queries
|
|
||||||
batch, _, height, width = x.shape
|
|
||||||
|
|
||||||
is_one_query = self.num_queries == 1
|
|
||||||
|
|
||||||
# queries, keys, values
|
|
||||||
|
|
||||||
q = self.queries * self.scale
|
|
||||||
k, v = self.to_kv(x).chunk(2, dim = 1)
|
|
||||||
|
|
||||||
# similarities
|
|
||||||
|
|
||||||
sim = einsum('h l d, b d x y -> b h l x y', q, k)
|
|
||||||
sim = rearrange(sim, 'b ... x y -> b (...) x y')
|
|
||||||
|
|
||||||
# unfold the similarity scores, with float(-inf) as padding value
|
|
||||||
|
|
||||||
mask_value = -torch.finfo(sim.dtype).max
|
|
||||||
sim = F.pad(sim, ((wsz // 2,) * 4), value = mask_value)
|
|
||||||
sim = F.unfold(sim, kernel_size = wsz)
|
|
||||||
sim = rearrange(sim, 'b (h l j) (x y) -> b h l j x y', h = heads, l = num_queries, x = height, y = width)
|
|
||||||
|
|
||||||
# rel pos bias
|
|
||||||
|
|
||||||
sim = sim + self.rel_pos_bias
|
|
||||||
|
|
||||||
# numerically stable attention
|
|
||||||
|
|
||||||
sim = sim - sim.amax(dim = -3, keepdim = True).detach()
|
|
||||||
attn = sim.softmax(dim = -3)
|
|
||||||
|
|
||||||
# unfold values
|
|
||||||
|
|
||||||
v = F.pad(v, ((wsz // 2,) * 4), value = 0.)
|
|
||||||
v = F.unfold(v, kernel_size = wsz)
|
|
||||||
v = rearrange(v, 'b (d j) (x y) -> b d j x y', d = dim_head, x = height, y = width)
|
|
||||||
|
|
||||||
# aggregate values
|
|
||||||
|
|
||||||
out = einsum('b h l j x y, b d j x y -> b l h d x y', attn, v)
|
|
||||||
|
|
||||||
# combine heads
|
|
||||||
|
|
||||||
out = rearrange(out, 'b l h d x y -> (b l) (h d) x y')
|
|
||||||
out = self.to_out(out)
|
|
||||||
out = rearrange(out, '(b l) d x y -> b l d x y', b = batch)
|
|
||||||
|
|
||||||
# return original input if one query
|
|
||||||
|
|
||||||
if is_one_query:
|
|
||||||
out = rearrange(out, 'b 1 ... -> b ...')
|
|
||||||
|
|
||||||
return out
|
|
||||||
|
|
||||||
class QueryAttnUpsample(nn.Module):
|
|
||||||
def __init__(self, dim, **kwargs):
|
|
||||||
super().__init__()
|
|
||||||
self.norm = LayerNormChan(dim)
|
|
||||||
self.qna = QueryAndAttend(dim = dim, num_queries = 4, **kwargs)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
x = self.norm(x)
|
|
||||||
out = self.qna(x)
|
|
||||||
out = rearrange(out, 'b (w1 w2) c h w -> b c (h w1) (w w2)', w1 = 2, w2 = 2)
|
|
||||||
return out
|
|
||||||
@@ -17,7 +17,6 @@ from kornia.filters import gaussian_blur2d
|
|||||||
|
|
||||||
from dalle2_pytorch.tokenizer import tokenizer
|
from dalle2_pytorch.tokenizer import tokenizer
|
||||||
from dalle2_pytorch.vqgan_vae import NullVQGanVAE, VQGanVAE
|
from dalle2_pytorch.vqgan_vae import NullVQGanVAE, VQGanVAE
|
||||||
from dalle2_pytorch.attention import QueryAttnUpsample
|
|
||||||
|
|
||||||
# use x-clip
|
# use x-clip
|
||||||
|
|
||||||
@@ -36,6 +35,10 @@ def default(val, d):
|
|||||||
def cast_tuple(val, length = 1):
|
def cast_tuple(val, length = 1):
|
||||||
return val if isinstance(val, tuple) else ((val,) * length)
|
return val if isinstance(val, tuple) else ((val,) * length)
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def null_context(*args, **kwargs):
|
||||||
|
yield
|
||||||
|
|
||||||
def eval_decorator(fn):
|
def eval_decorator(fn):
|
||||||
def inner(model, *args, **kwargs):
|
def inner(model, *args, **kwargs):
|
||||||
was_training = model.training
|
was_training = model.training
|
||||||
@@ -84,7 +87,7 @@ def resize_image_to(t, image_size, mode = 'bilinear'): # take a look at https://
|
|||||||
if orig_image_size == shape:
|
if orig_image_size == shape:
|
||||||
return t
|
return t
|
||||||
|
|
||||||
return F.interpolate(t, size = shape, mode = mode)
|
return F.interpolate(t, size = shape, mode = mode, align_corners = False)
|
||||||
|
|
||||||
# classifier free guidance functions
|
# classifier free guidance functions
|
||||||
|
|
||||||
@@ -143,6 +146,92 @@ def sigmoid_beta_schedule(timesteps):
|
|||||||
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
|
||||||
|
|
||||||
|
|
||||||
|
class BaseGaussianDiffusion(nn.Module):
|
||||||
|
def __init__(self, *, beta_schedule, timesteps, loss_type):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
if beta_schedule == "cosine":
|
||||||
|
betas = cosine_beta_schedule(timesteps)
|
||||||
|
elif beta_schedule == "linear":
|
||||||
|
betas = linear_beta_schedule(timesteps)
|
||||||
|
elif beta_schedule == "quadratic":
|
||||||
|
betas = quadratic_beta_schedule(timesteps)
|
||||||
|
elif beta_schedule == "jsd":
|
||||||
|
betas = 1.0 / torch.linspace(timesteps, 1, timesteps)
|
||||||
|
elif beta_schedule == "sigmoid":
|
||||||
|
betas = sigmoid_beta_schedule(timesteps)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
alphas = 1. - betas
|
||||||
|
alphas_cumprod = torch.cumprod(alphas, axis = 0)
|
||||||
|
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
|
||||||
|
|
||||||
|
timesteps, = betas.shape
|
||||||
|
self.num_timesteps = int(timesteps)
|
||||||
|
self.loss_type = loss_type
|
||||||
|
|
||||||
|
self.register_buffer('betas', betas)
|
||||||
|
self.register_buffer('alphas_cumprod', alphas_cumprod)
|
||||||
|
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
||||||
|
|
||||||
|
# calculations for diffusion q(x_t | x_{t-1}) and others
|
||||||
|
|
||||||
|
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
||||||
|
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
||||||
|
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
||||||
|
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
||||||
|
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
||||||
|
|
||||||
|
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
||||||
|
|
||||||
|
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
|
||||||
|
|
||||||
|
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
||||||
|
|
||||||
|
self.register_buffer('posterior_variance', posterior_variance)
|
||||||
|
|
||||||
|
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
||||||
|
|
||||||
|
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
||||||
|
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
||||||
|
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
||||||
|
|
||||||
|
def q_mean_variance(self, x_start, t):
|
||||||
|
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
||||||
|
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
|
||||||
|
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
||||||
|
return mean, variance, log_variance
|
||||||
|
|
||||||
|
def q_posterior(self, x_start, x_t, t):
|
||||||
|
posterior_mean = (
|
||||||
|
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
||||||
|
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
||||||
|
)
|
||||||
|
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
|
||||||
|
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
|
||||||
|
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
||||||
|
|
||||||
|
def q_sample(self, x_start, t, noise=None):
|
||||||
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
||||||
|
|
||||||
|
return (
|
||||||
|
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
||||||
|
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
|
||||||
|
)
|
||||||
|
|
||||||
|
def predict_start_from_noise(self, x_t, t, noise):
|
||||||
|
return (
|
||||||
|
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
||||||
|
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
||||||
|
)
|
||||||
|
|
||||||
|
def sample(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def forward(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
# diffusion prior
|
# diffusion prior
|
||||||
|
|
||||||
class LayerNorm(nn.Module):
|
class LayerNorm(nn.Module):
|
||||||
@@ -481,12 +570,15 @@ class DiffusionPriorNetwork(nn.Module):
|
|||||||
|
|
||||||
return pred_image_embed
|
return pred_image_embed
|
||||||
|
|
||||||
class DiffusionPrior(nn.Module):
|
class DiffusionPrior(BaseGaussianDiffusion):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
net,
|
net,
|
||||||
*,
|
*,
|
||||||
clip,
|
clip = None,
|
||||||
|
image_embed_dim = None,
|
||||||
|
image_size = None,
|
||||||
|
image_channels = 3,
|
||||||
timesteps = 1000,
|
timesteps = 1000,
|
||||||
cond_drop_prob = 0.2,
|
cond_drop_prob = 0.2,
|
||||||
loss_type = "l1",
|
loss_type = "l1",
|
||||||
@@ -494,15 +586,23 @@ class DiffusionPrior(nn.Module):
|
|||||||
beta_schedule = "cosine",
|
beta_schedule = "cosine",
|
||||||
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__(
|
||||||
assert isinstance(clip, CLIP)
|
beta_schedule = beta_schedule,
|
||||||
freeze_model_and_make_eval_(clip)
|
timesteps = timesteps,
|
||||||
self.clip = clip
|
loss_type = loss_type
|
||||||
|
)
|
||||||
|
|
||||||
|
if exists(clip):
|
||||||
|
assert isinstance(clip, CLIP)
|
||||||
|
freeze_model_and_make_eval_(clip)
|
||||||
|
self.clip = clip
|
||||||
|
else:
|
||||||
|
assert exists(image_embed_dim), 'latent dimension must be given, if training prior network without CLIP given'
|
||||||
|
self.clip = None
|
||||||
|
|
||||||
self.net = net
|
self.net = net
|
||||||
self.image_embed_dim = clip.dim_latent
|
self.image_embed_dim = default(image_embed_dim, lambda: clip.dim_latent)
|
||||||
self.channels = clip.image_channels
|
self.channels = default(image_channels, lambda: clip.image_channels)
|
||||||
self.image_size = clip.image_size
|
|
||||||
|
|
||||||
self.cond_drop_prob = cond_drop_prob
|
self.cond_drop_prob = cond_drop_prob
|
||||||
self.condition_on_text_encodings = condition_on_text_encodings
|
self.condition_on_text_encodings = condition_on_text_encodings
|
||||||
@@ -510,55 +610,10 @@ class DiffusionPrior(nn.Module):
|
|||||||
self.predict_x_start = predict_x_start
|
self.predict_x_start = predict_x_start
|
||||||
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
|
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
|
||||||
|
|
||||||
if beta_schedule == "cosine":
|
|
||||||
betas = cosine_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "linear":
|
|
||||||
betas = linear_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "quadratic":
|
|
||||||
betas = quadratic_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "jsd":
|
|
||||||
betas = 1.0 / torch.linspace(timesteps, 1, timesteps)
|
|
||||||
elif beta_schedule == "sigmoid":
|
|
||||||
betas = sigmoid_beta_schedule(timesteps)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
alphas = 1. - betas
|
|
||||||
alphas_cumprod = torch.cumprod(alphas, axis = 0)
|
|
||||||
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
|
|
||||||
|
|
||||||
timesteps, = betas.shape
|
|
||||||
self.num_timesteps = int(timesteps)
|
|
||||||
self.loss_type = loss_type
|
|
||||||
|
|
||||||
self.register_buffer('betas', betas)
|
|
||||||
self.register_buffer('alphas_cumprod', alphas_cumprod)
|
|
||||||
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
|
||||||
|
|
||||||
# calculations for diffusion q(x_t | x_{t-1}) and others
|
|
||||||
|
|
||||||
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
|
||||||
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
|
||||||
|
|
||||||
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
|
||||||
|
|
||||||
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
|
|
||||||
|
|
||||||
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
|
||||||
|
|
||||||
self.register_buffer('posterior_variance', posterior_variance)
|
|
||||||
|
|
||||||
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
|
||||||
|
|
||||||
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
|
||||||
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
|
||||||
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def get_image_embed(self, image):
|
def get_image_embed(self, image):
|
||||||
|
assert exists(self.clip)
|
||||||
|
|
||||||
image_encoding = self.clip.visual_transformer(image)
|
image_encoding = self.clip.visual_transformer(image)
|
||||||
image_cls = image_encoding[:, 0]
|
image_cls = image_encoding[:, 0]
|
||||||
image_embed = self.clip.to_visual_latent(image_cls)
|
image_embed = self.clip.to_visual_latent(image_cls)
|
||||||
@@ -566,6 +621,8 @@ class DiffusionPrior(nn.Module):
|
|||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def get_text_cond(self, text):
|
def get_text_cond(self, text):
|
||||||
|
assert exists(self.clip)
|
||||||
|
|
||||||
text_encodings = self.clip.text_transformer(text)
|
text_encodings = self.clip.text_transformer(text)
|
||||||
text_cls, text_encodings = text_encodings[:, 0], text_encodings[:, 1:]
|
text_cls, text_encodings = text_encodings[:, 0], text_encodings[:, 1:]
|
||||||
text_embed = self.clip.to_text_latent(text_cls)
|
text_embed = self.clip.to_text_latent(text_cls)
|
||||||
@@ -576,27 +633,6 @@ class DiffusionPrior(nn.Module):
|
|||||||
|
|
||||||
return dict(text_encodings = text_encodings, text_embed = text_embed, mask = text != 0)
|
return dict(text_encodings = text_encodings, text_embed = text_embed, mask = text != 0)
|
||||||
|
|
||||||
def q_mean_variance(self, x_start, t):
|
|
||||||
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
|
||||||
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
|
|
||||||
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
|
||||||
return mean, variance, log_variance
|
|
||||||
|
|
||||||
def predict_start_from_noise(self, x_t, t, noise):
|
|
||||||
return (
|
|
||||||
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
|
||||||
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
|
||||||
)
|
|
||||||
|
|
||||||
def q_posterior(self, x_start, x_t, t):
|
|
||||||
posterior_mean = (
|
|
||||||
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
|
||||||
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
|
||||||
)
|
|
||||||
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
|
|
||||||
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
|
|
||||||
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
|
||||||
|
|
||||||
def p_mean_variance(self, x, t, text_cond, clip_denoised: bool):
|
def p_mean_variance(self, x, t, text_cond, clip_denoised: bool):
|
||||||
pred = self.net(x, t, **text_cond)
|
pred = self.net(x, t, **text_cond)
|
||||||
|
|
||||||
@@ -633,14 +669,6 @@ class DiffusionPrior(nn.Module):
|
|||||||
img = self.p_sample(img, torch.full((b,), i, device = device, dtype = torch.long), text_cond = text_cond)
|
img = self.p_sample(img, torch.full((b,), i, device = device, dtype = torch.long), text_cond = text_cond)
|
||||||
return img
|
return img
|
||||||
|
|
||||||
def q_sample(self, x_start, t, noise=None):
|
|
||||||
noise = default(noise, lambda: torch.randn_like(x_start))
|
|
||||||
|
|
||||||
return (
|
|
||||||
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
|
||||||
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
|
|
||||||
)
|
|
||||||
|
|
||||||
def p_losses(self, image_embed, t, text_cond, noise = None):
|
def p_losses(self, image_embed, t, text_cond, noise = None):
|
||||||
noise = default(noise, lambda: torch.randn_like(image_embed))
|
noise = default(noise, lambda: torch.randn_like(image_embed))
|
||||||
|
|
||||||
@@ -881,6 +909,7 @@ class Unet(nn.Module):
|
|||||||
dim,
|
dim,
|
||||||
*,
|
*,
|
||||||
image_embed_dim,
|
image_embed_dim,
|
||||||
|
text_embed_dim = None,
|
||||||
cond_dim = None,
|
cond_dim = None,
|
||||||
num_image_tokens = 4,
|
num_image_tokens = 4,
|
||||||
num_time_tokens = 2,
|
num_time_tokens = 2,
|
||||||
@@ -934,7 +963,7 @@ class Unet(nn.Module):
|
|||||||
Rearrange('b (n d) -> b n d', n = num_image_tokens)
|
Rearrange('b (n d) -> b n d', n = num_image_tokens)
|
||||||
) if image_embed_dim != cond_dim else nn.Identity()
|
) if image_embed_dim != cond_dim else nn.Identity()
|
||||||
|
|
||||||
self.text_to_cond = nn.LazyLinear(cond_dim)
|
self.text_to_cond = nn.LazyLinear(cond_dim) if not exists(text_embed_dim) else nn.Linear(text_embed_dim, cond_dim)
|
||||||
|
|
||||||
# finer control over whether to condition on image embeddings and text encodings
|
# finer control over whether to condition on image embeddings and text encodings
|
||||||
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
|
# so one can have the latter unets in the cascading DDPMs only focus on super-resoluting
|
||||||
@@ -1153,7 +1182,7 @@ class LowresConditioner(nn.Module):
|
|||||||
|
|
||||||
return cond_fmap
|
return cond_fmap
|
||||||
|
|
||||||
class Decoder(nn.Module):
|
class Decoder(BaseGaussianDiffusion):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
unet,
|
unet,
|
||||||
@@ -1173,7 +1202,12 @@ class Decoder(nn.Module):
|
|||||||
blur_kernel_size = 3, # cascading ddpm - blur kernel size
|
blur_kernel_size = 3, # cascading ddpm - blur kernel size
|
||||||
condition_on_text_encodings = False, # the paper suggested that this didn't do much in the decoder, but i'm allowing the option for experimentation
|
condition_on_text_encodings = False, # the paper suggested that this didn't do much in the decoder, but i'm allowing the option for experimentation
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__(
|
||||||
|
beta_schedule = beta_schedule,
|
||||||
|
timesteps = timesteps,
|
||||||
|
loss_type = loss_type
|
||||||
|
)
|
||||||
|
|
||||||
assert isinstance(clip, CLIP)
|
assert isinstance(clip, CLIP)
|
||||||
freeze_model_and_make_eval_(clip)
|
freeze_model_and_make_eval_(clip)
|
||||||
self.clip = clip
|
self.clip = clip
|
||||||
@@ -1237,55 +1271,6 @@ class Decoder(nn.Module):
|
|||||||
|
|
||||||
self.cond_drop_prob = cond_drop_prob
|
self.cond_drop_prob = cond_drop_prob
|
||||||
|
|
||||||
# noise schedule
|
|
||||||
|
|
||||||
if beta_schedule == "cosine":
|
|
||||||
betas = cosine_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "linear":
|
|
||||||
betas = linear_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "quadratic":
|
|
||||||
betas = quadratic_beta_schedule(timesteps)
|
|
||||||
elif beta_schedule == "jsd":
|
|
||||||
betas = 1.0 / torch.linspace(timesteps, 1, timesteps)
|
|
||||||
elif beta_schedule == "sigmoid":
|
|
||||||
betas = sigmoid_beta_schedule(timesteps)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
alphas = 1. - betas
|
|
||||||
alphas_cumprod = torch.cumprod(alphas, axis = 0)
|
|
||||||
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value = 1.)
|
|
||||||
|
|
||||||
timesteps, = betas.shape
|
|
||||||
self.num_timesteps = int(timesteps)
|
|
||||||
self.loss_type = loss_type
|
|
||||||
|
|
||||||
self.register_buffer('betas', betas)
|
|
||||||
self.register_buffer('alphas_cumprod', alphas_cumprod)
|
|
||||||
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
|
|
||||||
|
|
||||||
# calculations for diffusion q(x_t | x_{t-1}) and others
|
|
||||||
|
|
||||||
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
|
|
||||||
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
|
|
||||||
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
|
|
||||||
|
|
||||||
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
|
||||||
|
|
||||||
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
|
|
||||||
|
|
||||||
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
|
||||||
|
|
||||||
self.register_buffer('posterior_variance', posterior_variance)
|
|
||||||
|
|
||||||
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
|
||||||
|
|
||||||
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min =1e-20)))
|
|
||||||
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
|
|
||||||
self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
|
|
||||||
|
|
||||||
def get_unet(self, unet_number):
|
def get_unet(self, unet_number):
|
||||||
assert 0 < unet_number <= len(self.unets)
|
assert 0 < unet_number <= len(self.unets)
|
||||||
index = unet_number - 1
|
index = unet_number - 1
|
||||||
@@ -1318,27 +1303,6 @@ class Decoder(nn.Module):
|
|||||||
image_embed = self.clip.to_visual_latent(image_cls)
|
image_embed = self.clip.to_visual_latent(image_cls)
|
||||||
return l2norm(image_embed)
|
return l2norm(image_embed)
|
||||||
|
|
||||||
def q_mean_variance(self, x_start, t):
|
|
||||||
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
|
|
||||||
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
|
|
||||||
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
|
||||||
return mean, variance, log_variance
|
|
||||||
|
|
||||||
def predict_start_from_noise(self, x_t, t, noise):
|
|
||||||
return (
|
|
||||||
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
|
||||||
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
|
||||||
)
|
|
||||||
|
|
||||||
def q_posterior(self, x_start, x_t, t):
|
|
||||||
posterior_mean = (
|
|
||||||
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
|
||||||
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
|
||||||
)
|
|
||||||
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
|
|
||||||
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
|
|
||||||
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
|
||||||
|
|
||||||
def p_mean_variance(self, unet, x, t, image_embed, text_encodings = None, lowres_cond_img = None, clip_denoised = True, predict_x_start = False, cond_scale = 1.):
|
def p_mean_variance(self, unet, x, t, image_embed, text_encodings = None, lowres_cond_img = None, clip_denoised = True, predict_x_start = False, cond_scale = 1.):
|
||||||
pred = unet.forward_with_cond_scale(x, t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img)
|
pred = unet.forward_with_cond_scale(x, t, image_embed = image_embed, text_encodings = text_encodings, cond_scale = cond_scale, lowres_cond_img = lowres_cond_img)
|
||||||
|
|
||||||
@@ -1383,14 +1347,6 @@ class Decoder(nn.Module):
|
|||||||
|
|
||||||
return img
|
return img
|
||||||
|
|
||||||
def q_sample(self, x_start, t, noise=None):
|
|
||||||
noise = default(noise, lambda: torch.randn_like(x_start))
|
|
||||||
|
|
||||||
return (
|
|
||||||
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
|
||||||
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
|
|
||||||
)
|
|
||||||
|
|
||||||
def p_losses(self, unet, x_start, t, *, image_embed, lowres_cond_img = None, text_encodings = None, predict_x_start = False, noise = None):
|
def p_losses(self, unet, x_start, t, *, image_embed, lowres_cond_img = None, text_encodings = None, predict_x_start = False, noise = None):
|
||||||
noise = default(noise, lambda: torch.randn_like(x_start))
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
||||||
|
|
||||||
@@ -1430,7 +1386,10 @@ class Decoder(nn.Module):
|
|||||||
img = None
|
img = None
|
||||||
|
|
||||||
for unet, vae, channel, image_size, predict_x_start in tqdm(zip(self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start)):
|
for unet, vae, channel, image_size, predict_x_start in tqdm(zip(self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start)):
|
||||||
with self.one_unet_in_gpu(unet = unet):
|
|
||||||
|
context = self.one_unet_in_gpu(unet = unet) if image_embed.is_cuda else null_context()
|
||||||
|
|
||||||
|
with context:
|
||||||
lowres_cond_img = None
|
lowres_cond_img = None
|
||||||
shape = (batch_size, channel, image_size, image_size)
|
shape = (batch_size, channel, image_size, image_size)
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ from torch.autograd import grad as torch_grad
|
|||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
from einops import rearrange, reduce, repeat
|
from einops import rearrange, reduce, repeat
|
||||||
|
from einops_exts import rearrange_many
|
||||||
from dalle2_pytorch.attention import QueryAttnUpsample
|
from einops.layers.torch import Rearrange
|
||||||
|
|
||||||
# constants
|
# constants
|
||||||
|
|
||||||
@@ -146,6 +146,8 @@ class LayerNormChan(nn.Module):
|
|||||||
mean = torch.mean(x, dim = 1, keepdim = True)
|
mean = torch.mean(x, dim = 1, keepdim = True)
|
||||||
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
||||||
|
|
||||||
|
# discriminator
|
||||||
|
|
||||||
class Discriminator(nn.Module):
|
class Discriminator(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -179,6 +181,8 @@ class Discriminator(nn.Module):
|
|||||||
|
|
||||||
return self.to_logits(x)
|
return self.to_logits(x)
|
||||||
|
|
||||||
|
# positional encoding
|
||||||
|
|
||||||
class ContinuousPositionBias(nn.Module):
|
class ContinuousPositionBias(nn.Module):
|
||||||
""" from https://arxiv.org/abs/2111.09883 """
|
""" from https://arxiv.org/abs/2111.09883 """
|
||||||
|
|
||||||
@@ -213,6 +217,84 @@ class ContinuousPositionBias(nn.Module):
|
|||||||
bias = rearrange(rel_pos, 'i j h -> h i j')
|
bias = rearrange(rel_pos, 'i j h -> h i j')
|
||||||
return x + bias
|
return x + bias
|
||||||
|
|
||||||
|
# resnet encoder / decoder
|
||||||
|
|
||||||
|
class ResnetEncDec(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
channels = 3,
|
||||||
|
layers = 4,
|
||||||
|
layer_mults = None,
|
||||||
|
num_resnet_blocks = 1,
|
||||||
|
resnet_groups = 16,
|
||||||
|
first_conv_kernel_size = 5,
|
||||||
|
use_attn = True,
|
||||||
|
attn_dim_head = 64,
|
||||||
|
attn_heads = 8,
|
||||||
|
attn_dropout = 0.,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
|
||||||
|
|
||||||
|
self.layers = layers
|
||||||
|
|
||||||
|
self.encoders = MList([])
|
||||||
|
self.decoders = MList([])
|
||||||
|
|
||||||
|
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
|
||||||
|
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
|
||||||
|
|
||||||
|
layer_dims = [dim * mult for mult in layer_mults]
|
||||||
|
dims = (dim, *layer_dims)
|
||||||
|
|
||||||
|
self.encoded_dim = dims[-1]
|
||||||
|
|
||||||
|
dim_pairs = zip(dims[:-1], dims[1:])
|
||||||
|
|
||||||
|
append = lambda arr, t: arr.append(t)
|
||||||
|
prepend = lambda arr, t: arr.insert(0, t)
|
||||||
|
|
||||||
|
if not isinstance(num_resnet_blocks, tuple):
|
||||||
|
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
|
||||||
|
|
||||||
|
if not isinstance(use_attn, tuple):
|
||||||
|
use_attn = (*((False,) * (layers - 1)), use_attn)
|
||||||
|
|
||||||
|
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
|
||||||
|
assert len(use_attn) == layers
|
||||||
|
|
||||||
|
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(layers), dim_pairs, num_resnet_blocks, use_attn):
|
||||||
|
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
|
||||||
|
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
|
||||||
|
|
||||||
|
if layer_use_attn:
|
||||||
|
prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
||||||
|
|
||||||
|
for _ in range(layer_num_resnet_blocks):
|
||||||
|
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
|
||||||
|
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
|
||||||
|
|
||||||
|
if layer_use_attn:
|
||||||
|
append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
||||||
|
|
||||||
|
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
|
||||||
|
append(self.decoders, nn.Conv2d(dim, channels, 1))
|
||||||
|
|
||||||
|
def get_encoded_fmap_size(self, image_size):
|
||||||
|
return image_size // (2 ** self.layers)
|
||||||
|
|
||||||
|
def encode(self, x):
|
||||||
|
for enc in self.encoders:
|
||||||
|
x = enc(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
def decode(self, x):
|
||||||
|
for dec in self.decoders:
|
||||||
|
x = dec(x)
|
||||||
|
return x
|
||||||
|
|
||||||
class GLUResBlock(nn.Module):
|
class GLUResBlock(nn.Module):
|
||||||
def __init__(self, chan, groups = 16):
|
def __init__(self, chan, groups = 16):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -246,6 +328,7 @@ class ResBlock(nn.Module):
|
|||||||
return self.net(x) + x
|
return self.net(x) + x
|
||||||
|
|
||||||
# vqgan attention layer
|
# vqgan attention layer
|
||||||
|
|
||||||
class VQGanAttention(nn.Module):
|
class VQGanAttention(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -290,6 +373,145 @@ class VQGanAttention(nn.Module):
|
|||||||
|
|
||||||
return out + residual
|
return out + residual
|
||||||
|
|
||||||
|
# ViT encoder / decoder
|
||||||
|
|
||||||
|
class RearrangeImage(nn.Module):
|
||||||
|
def forward(self, x):
|
||||||
|
n = x.shape[1]
|
||||||
|
w = h = int(sqrt(n))
|
||||||
|
return rearrange(x, 'b (h w) ... -> b h w ...', h = h, w = w)
|
||||||
|
|
||||||
|
class Attention(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
heads = 8,
|
||||||
|
dim_head = 32
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.norm = nn.LayerNorm(dim)
|
||||||
|
self.heads = heads
|
||||||
|
self.scale = dim_head ** -0.5
|
||||||
|
inner_dim = dim_head * heads
|
||||||
|
|
||||||
|
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
|
||||||
|
self.to_out = nn.Linear(inner_dim, dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h = self.heads
|
||||||
|
|
||||||
|
x = self.norm(x)
|
||||||
|
|
||||||
|
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
|
||||||
|
q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)
|
||||||
|
|
||||||
|
q = q * self.scale
|
||||||
|
sim = einsum('b h i d, b h j d -> b h i j', q, k)
|
||||||
|
|
||||||
|
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
|
||||||
|
attn = sim.softmax(dim = -1)
|
||||||
|
|
||||||
|
out = einsum('b h i j, b h j d -> b h i d', attn, v)
|
||||||
|
|
||||||
|
out = rearrange(out, 'b h n d -> b n (h d)')
|
||||||
|
return self.to_out(out)
|
||||||
|
|
||||||
|
def FeedForward(dim, mult = 4):
|
||||||
|
return nn.Sequential(
|
||||||
|
nn.LayerNorm(dim),
|
||||||
|
nn.Linear(dim, dim * mult, bias = False),
|
||||||
|
nn.GELU(),
|
||||||
|
nn.Linear(dim * mult, dim, bias = False)
|
||||||
|
)
|
||||||
|
|
||||||
|
class Transformer(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
layers,
|
||||||
|
dim_head = 32,
|
||||||
|
heads = 8,
|
||||||
|
ff_mult = 4
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.layers = nn.ModuleList([])
|
||||||
|
for _ in range(layers):
|
||||||
|
self.layers.append(nn.ModuleList([
|
||||||
|
Attention(dim = dim, dim_head = dim_head, heads = heads),
|
||||||
|
FeedForward(dim = dim, mult = ff_mult)
|
||||||
|
]))
|
||||||
|
|
||||||
|
self.norm = nn.LayerNorm(dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
for attn, ff in self.layers:
|
||||||
|
x = attn(x) + x
|
||||||
|
x = ff(x) + x
|
||||||
|
|
||||||
|
return self.norm(x)
|
||||||
|
|
||||||
|
class ViTEncDec(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
channels = 3,
|
||||||
|
layers = 4,
|
||||||
|
patch_size = 8,
|
||||||
|
dim_head = 32,
|
||||||
|
heads = 8,
|
||||||
|
ff_mult = 4
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.encoded_dim = dim
|
||||||
|
self.patch_size = patch_size
|
||||||
|
|
||||||
|
input_dim = channels * (patch_size ** 2)
|
||||||
|
|
||||||
|
self.encoder = nn.Sequential(
|
||||||
|
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
|
||||||
|
nn.Linear(input_dim, dim),
|
||||||
|
Transformer(
|
||||||
|
dim = dim,
|
||||||
|
dim_head = dim_head,
|
||||||
|
heads = heads,
|
||||||
|
ff_mult = ff_mult,
|
||||||
|
layers = layers
|
||||||
|
),
|
||||||
|
RearrangeImage(),
|
||||||
|
Rearrange('b h w c -> b c h w')
|
||||||
|
)
|
||||||
|
|
||||||
|
self.decoder = nn.Sequential(
|
||||||
|
Rearrange('b c h w -> b (h w) c'),
|
||||||
|
Transformer(
|
||||||
|
dim = dim,
|
||||||
|
dim_head = dim_head,
|
||||||
|
heads = heads,
|
||||||
|
ff_mult = ff_mult,
|
||||||
|
layers = layers
|
||||||
|
),
|
||||||
|
nn.Sequential(
|
||||||
|
nn.Linear(dim, dim * 4, bias = False),
|
||||||
|
nn.Tanh(),
|
||||||
|
nn.Linear(dim * 4, input_dim, bias = False),
|
||||||
|
),
|
||||||
|
RearrangeImage(),
|
||||||
|
Rearrange('b h w (p1 p2 c) -> b c (h p1) (w p2)', p1 = patch_size, p2 = patch_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_encoded_fmap_size(self, image_size):
|
||||||
|
return image_size // self.patch_size
|
||||||
|
|
||||||
|
def encode(self, x):
|
||||||
|
return self.encoder(x)
|
||||||
|
|
||||||
|
def decode(self, x):
|
||||||
|
return self.decoder(x)
|
||||||
|
|
||||||
|
# main vqgan-vae classes
|
||||||
|
|
||||||
class NullVQGanVAE(nn.Module):
|
class NullVQGanVAE(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -320,81 +542,43 @@ class VQGanVAE(nn.Module):
|
|||||||
image_size,
|
image_size,
|
||||||
channels = 3,
|
channels = 3,
|
||||||
layers = 4,
|
layers = 4,
|
||||||
layer_mults = None,
|
|
||||||
l2_recon_loss = False,
|
l2_recon_loss = False,
|
||||||
use_hinge_loss = True,
|
use_hinge_loss = True,
|
||||||
num_resnet_blocks = 1,
|
|
||||||
vgg = None,
|
vgg = None,
|
||||||
vq_codebook_size = 512,
|
vq_codebook_size = 512,
|
||||||
vq_decay = 0.8,
|
vq_decay = 0.8,
|
||||||
vq_commitment_weight = 1.,
|
vq_commitment_weight = 1.,
|
||||||
vq_kmeans_init = True,
|
vq_kmeans_init = True,
|
||||||
vq_use_cosine_sim = True,
|
vq_use_cosine_sim = True,
|
||||||
use_attn = True,
|
|
||||||
attn_dim_head = 64,
|
|
||||||
attn_heads = 8,
|
|
||||||
resnet_groups = 16,
|
|
||||||
attn_dropout = 0.,
|
|
||||||
first_conv_kernel_size = 5,
|
|
||||||
use_vgg_and_gan = True,
|
use_vgg_and_gan = True,
|
||||||
|
vae_type = 'resnet',
|
||||||
|
discr_layers = 4,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
|
|
||||||
|
|
||||||
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
|
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
|
||||||
|
encdec_kwargs, kwargs = groupby_prefix_and_trim('encdec_', kwargs)
|
||||||
|
|
||||||
self.image_size = image_size
|
self.image_size = image_size
|
||||||
self.channels = channels
|
self.channels = channels
|
||||||
self.layers = layers
|
|
||||||
self.fmap_size = image_size // (layers ** 2)
|
|
||||||
self.codebook_size = vq_codebook_size
|
self.codebook_size = vq_codebook_size
|
||||||
|
|
||||||
self.encoders = MList([])
|
if vae_type == 'resnet':
|
||||||
self.decoders = MList([])
|
enc_dec_klass = ResnetEncDec
|
||||||
|
elif vae_type == 'vit':
|
||||||
|
enc_dec_klass = ViTEncDec
|
||||||
|
else:
|
||||||
|
raise ValueError(f'{vae_type} not valid')
|
||||||
|
|
||||||
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
|
self.enc_dec = enc_dec_klass(
|
||||||
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
|
dim = dim,
|
||||||
|
channels = channels,
|
||||||
layer_dims = [dim * mult for mult in layer_mults]
|
layers = layers,
|
||||||
dims = (dim, *layer_dims)
|
**encdec_kwargs
|
||||||
codebook_dim = layer_dims[-1]
|
)
|
||||||
|
|
||||||
self.encoded_dim = dims[-1]
|
|
||||||
|
|
||||||
dim_pairs = zip(dims[:-1], dims[1:])
|
|
||||||
|
|
||||||
append = lambda arr, t: arr.append(t)
|
|
||||||
prepend = lambda arr, t: arr.insert(0, t)
|
|
||||||
|
|
||||||
if not isinstance(num_resnet_blocks, tuple):
|
|
||||||
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
|
|
||||||
|
|
||||||
if not isinstance(use_attn, tuple):
|
|
||||||
use_attn = (*((False,) * (layers - 1)), use_attn)
|
|
||||||
|
|
||||||
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
|
|
||||||
assert len(use_attn) == layers
|
|
||||||
|
|
||||||
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(layers), dim_pairs, num_resnet_blocks, use_attn):
|
|
||||||
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
|
|
||||||
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
|
|
||||||
|
|
||||||
if layer_use_attn:
|
|
||||||
prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
|
||||||
|
|
||||||
for _ in range(layer_num_resnet_blocks):
|
|
||||||
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
|
|
||||||
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
|
|
||||||
|
|
||||||
if layer_use_attn:
|
|
||||||
append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
|
||||||
|
|
||||||
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
|
|
||||||
append(self.decoders, nn.Conv2d(dim, channels, 1))
|
|
||||||
|
|
||||||
self.vq = VQ(
|
self.vq = VQ(
|
||||||
dim = codebook_dim,
|
dim = self.enc_dec.encoded_dim,
|
||||||
codebook_size = vq_codebook_size,
|
codebook_size = vq_codebook_size,
|
||||||
decay = vq_decay,
|
decay = vq_decay,
|
||||||
commitment_weight = vq_commitment_weight,
|
commitment_weight = vq_commitment_weight,
|
||||||
@@ -427,13 +611,21 @@ class VQGanVAE(nn.Module):
|
|||||||
|
|
||||||
# gan related losses
|
# gan related losses
|
||||||
|
|
||||||
|
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
|
||||||
|
layer_dims = [dim * mult for mult in layer_mults]
|
||||||
|
dims = (dim, *layer_dims)
|
||||||
|
|
||||||
self.discr = Discriminator(dims = dims, channels = channels)
|
self.discr = Discriminator(dims = dims, channels = channels)
|
||||||
|
|
||||||
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
|
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
|
||||||
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
|
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
|
||||||
|
|
||||||
|
@property
|
||||||
|
def encoded_dim(self):
|
||||||
|
return self.enc_dec.encoded_dim
|
||||||
|
|
||||||
def get_encoded_fmap_size(self, image_size):
|
def get_encoded_fmap_size(self, image_size):
|
||||||
return image_size // (2 ** self.layers)
|
return self.enc_dec.get_encoded_fmap_size(image_size)
|
||||||
|
|
||||||
def copy_for_eval(self):
|
def copy_for_eval(self):
|
||||||
device = next(self.parameters()).device
|
device = next(self.parameters()).device
|
||||||
@@ -459,16 +651,13 @@ class VQGanVAE(nn.Module):
|
|||||||
return self.vq.codebook
|
return self.vq.codebook
|
||||||
|
|
||||||
def encode(self, fmap):
|
def encode(self, fmap):
|
||||||
for enc in self.encoders:
|
fmap = self.enc_dec.encode(fmap)
|
||||||
fmap = enc(fmap)
|
|
||||||
|
|
||||||
return fmap
|
return fmap
|
||||||
|
|
||||||
def decode(self, fmap, return_indices_and_loss = False):
|
def decode(self, fmap, return_indices_and_loss = False):
|
||||||
fmap, indices, commit_loss = self.vq(fmap)
|
fmap, indices, commit_loss = self.vq(fmap)
|
||||||
|
|
||||||
for dec in self.decoders:
|
fmap = self.enc_dec.decode(fmap)
|
||||||
fmap = dec(fmap)
|
|
||||||
|
|
||||||
if not return_indices_and_loss:
|
if not return_indices_and_loss:
|
||||||
return fmap
|
return fmap
|
||||||
|
|||||||
Reference in New Issue
Block a user