mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 22:24:19 +01:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb26187450 | ||
|
|
625ce23f6b | ||
|
|
dbf4a281f1 | ||
|
|
4ab527e779 | ||
|
|
d0cdeb3247 | ||
|
|
8c610aad9a | ||
|
|
6700381a37 | ||
|
|
20377f889a | ||
|
|
6edb1c5dd0 | ||
|
|
b093f92182 | ||
|
|
fa3bb6ba5c | ||
|
|
2705e7c9b0 | ||
|
|
77141882c8 | ||
|
|
4075d02139 |
19
README.md
19
README.md
@@ -499,10 +499,12 @@ loss.backward()
|
|||||||
|
|
||||||
### DALL-E2 with Latent Diffusion
|
### DALL-E2 with Latent Diffusion
|
||||||
|
|
||||||
This repository decides to take the next step and offer DALL-E2 combined with <a href="https://huggingface.co/spaces/multimodalart/latentdiffusion">latent diffusion</a>, from Rombach et al.
|
This repository decides to take the next step and offer DALL-E v2 combined with <a href="https://huggingface.co/spaces/multimodalart/latentdiffusion">latent diffusion</a>, from Rombach et al.
|
||||||
|
|
||||||
You can use it as follows. Latent diffusion can be limited to just the first U-Net in the cascade, or to any number you wish.
|
You can use it as follows. Latent diffusion can be limited to just the first U-Net in the cascade, or to any number you wish.
|
||||||
|
|
||||||
|
The repository also comes equipped with all the necessary settings to recreate `ViT-VQGan` from the <a href="https://arxiv.org/abs/2110.04627">Improved VQGans</a> paper. Furthermore, the <a href="https://github.com/lucidrains/vector-quantize-pytorch">vector quantization</a> library also comes equipped to do <a href="https://arxiv.org/abs/2203.01941">residual or multi-headed quantization</a>, which I believe will give an even further boost in performance to the autoencoder.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from dalle2_pytorch import Unet, Decoder, CLIP, VQGanVAE
|
from dalle2_pytorch import Unet, Decoder, CLIP, VQGanVAE
|
||||||
@@ -644,14 +646,15 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
- [x] for decoder, allow ability to customize objective (predict epsilon vs x0), in case latent diffusion does better with prediction of x0
|
- [x] for decoder, allow ability to customize objective (predict epsilon vs x0), in case latent diffusion does better with prediction of x0
|
||||||
- [x] use attention-based upsampling https://arxiv.org/abs/2112.11435
|
- [x] use attention-based upsampling https://arxiv.org/abs/2112.11435
|
||||||
- [x] use inheritance just this once for sharing logic between decoder and prior network ddpms
|
- [x] use inheritance just this once for sharing logic between decoder and prior network ddpms
|
||||||
- [ ] abstract interface for CLIP adapter class, so other CLIPs can be brought in
|
- [x] bring in vit-vqgan https://arxiv.org/abs/2110.04627 for the latent diffusion
|
||||||
|
- [x] abstract interface for CLIP adapter class, so other CLIPs can be brought in
|
||||||
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet
|
- [ ] become an expert with unets, cleanup unet code, make it fully configurable, port all learnings over to https://github.com/lucidrains/x-unet
|
||||||
- [ ] copy the cascading ddpm code to a separate repo (perhaps https://github.com/lucidrains/denoising-diffusion-pytorch) as the main contribution of dalle2 really is just the prior network
|
- [ ] copy the cascading ddpm code to a separate repo (perhaps https://github.com/lucidrains/denoising-diffusion-pytorch) as the main contribution of dalle2 really is just the prior network
|
||||||
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
- [ ] transcribe code to Jax, which lowers the activation energy for distributed training, given access to TPUs
|
||||||
- [ ] train on a toy task, offer in colab
|
- [ ] train on a toy task, offer in colab
|
||||||
|
- [ ] think about how best to design a declarative training config that handles preencoding for prior and training of multiple networks in decoder
|
||||||
- [ ] extend diffusion head to use diffusion-gan (potentially using lightweight-gan) to speed up inference
|
- [ ] extend diffusion head to use diffusion-gan (potentially using lightweight-gan) to speed up inference
|
||||||
- [ ] bring in tools to train vqgan-vae
|
- [ ] bring in tools to train vqgan-vae
|
||||||
- [ ] bring in vit-vqgan https://arxiv.org/abs/2110.04627 for the latent diffusion
|
|
||||||
|
|
||||||
## Citations
|
## Citations
|
||||||
|
|
||||||
@@ -697,4 +700,14 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@article{Yu2021VectorquantizedIM,
|
||||||
|
title = {Vector-quantized Image Modeling with Improved VQGAN},
|
||||||
|
author = {Jiahui Yu and Xin Li and Jing Yu Koh and Han Zhang and Ruoming Pang and James Qin and Alexander Ku and Yuanzhong Xu and Jason Baldridge and Yonghui Wu},
|
||||||
|
journal = {ArXiv},
|
||||||
|
year = {2021},
|
||||||
|
volume = {abs/2110.04627}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
*Creating noise from data is easy; creating data from noise is generative modeling.* - Yang Song's <a href="https://arxiv.org/abs/2011.13456">paper</a>
|
*Creating noise from data is easy; creating data from noise is generative modeling.* - Yang Song's <a href="https://arxiv.org/abs/2011.13456">paper</a>
|
||||||
|
|||||||
@@ -1,125 +0,0 @@
|
|||||||
import torch
|
|
||||||
from torch import nn, einsum
|
|
||||||
import torch.nn.functional as F
|
|
||||||
|
|
||||||
from einops import rearrange, repeat
|
|
||||||
|
|
||||||
class LayerNormChan(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
dim,
|
|
||||||
eps = 1e-5
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.eps = eps
|
|
||||||
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
|
|
||||||
mean = torch.mean(x, dim = 1, keepdim = True)
|
|
||||||
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
|
||||||
|
|
||||||
# attention-based upsampling
|
|
||||||
# from https://arxiv.org/abs/2112.11435
|
|
||||||
|
|
||||||
class QueryAndAttend(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
*,
|
|
||||||
dim,
|
|
||||||
num_queries = 1,
|
|
||||||
dim_head = 32,
|
|
||||||
heads = 8,
|
|
||||||
window_size = 3
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.scale = dim_head ** -0.5
|
|
||||||
inner_dim = dim_head * heads
|
|
||||||
self.heads = heads
|
|
||||||
self.dim_head = dim_head
|
|
||||||
self.window_size = window_size
|
|
||||||
self.num_queries = num_queries
|
|
||||||
|
|
||||||
self.rel_pos_bias = nn.Parameter(torch.randn(heads, num_queries, window_size * window_size, 1, 1))
|
|
||||||
|
|
||||||
self.queries = nn.Parameter(torch.randn(heads, num_queries, dim_head))
|
|
||||||
self.to_kv = nn.Conv2d(dim, dim_head * 2, 1, bias = False)
|
|
||||||
self.to_out = nn.Conv2d(inner_dim, dim, 1, bias = False)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
"""
|
|
||||||
einstein notation
|
|
||||||
b - batch
|
|
||||||
h - heads
|
|
||||||
l - num queries
|
|
||||||
d - head dimension
|
|
||||||
x - height
|
|
||||||
y - width
|
|
||||||
j - source sequence for attending to (kernel size squared in this case)
|
|
||||||
"""
|
|
||||||
|
|
||||||
wsz, heads, dim_head, num_queries = self.window_size, self.heads, self.dim_head, self.num_queries
|
|
||||||
batch, _, height, width = x.shape
|
|
||||||
|
|
||||||
is_one_query = self.num_queries == 1
|
|
||||||
|
|
||||||
# queries, keys, values
|
|
||||||
|
|
||||||
q = self.queries * self.scale
|
|
||||||
k, v = self.to_kv(x).chunk(2, dim = 1)
|
|
||||||
|
|
||||||
# similarities
|
|
||||||
|
|
||||||
sim = einsum('h l d, b d x y -> b h l x y', q, k)
|
|
||||||
sim = rearrange(sim, 'b ... x y -> b (...) x y')
|
|
||||||
|
|
||||||
# unfold the similarity scores, with float(-inf) as padding value
|
|
||||||
|
|
||||||
mask_value = -torch.finfo(sim.dtype).max
|
|
||||||
sim = F.pad(sim, ((wsz // 2,) * 4), value = mask_value)
|
|
||||||
sim = F.unfold(sim, kernel_size = wsz)
|
|
||||||
sim = rearrange(sim, 'b (h l j) (x y) -> b h l j x y', h = heads, l = num_queries, x = height, y = width)
|
|
||||||
|
|
||||||
# rel pos bias
|
|
||||||
|
|
||||||
sim = sim + self.rel_pos_bias
|
|
||||||
|
|
||||||
# numerically stable attention
|
|
||||||
|
|
||||||
sim = sim - sim.amax(dim = -3, keepdim = True).detach()
|
|
||||||
attn = sim.softmax(dim = -3)
|
|
||||||
|
|
||||||
# unfold values
|
|
||||||
|
|
||||||
v = F.pad(v, ((wsz // 2,) * 4), value = 0.)
|
|
||||||
v = F.unfold(v, kernel_size = wsz)
|
|
||||||
v = rearrange(v, 'b (d j) (x y) -> b d j x y', d = dim_head, x = height, y = width)
|
|
||||||
|
|
||||||
# aggregate values
|
|
||||||
|
|
||||||
out = einsum('b h l j x y, b d j x y -> b l h d x y', attn, v)
|
|
||||||
|
|
||||||
# combine heads
|
|
||||||
|
|
||||||
out = rearrange(out, 'b l h d x y -> (b l) (h d) x y')
|
|
||||||
out = self.to_out(out)
|
|
||||||
out = rearrange(out, '(b l) d x y -> b l d x y', b = batch)
|
|
||||||
|
|
||||||
# return original input if one query
|
|
||||||
|
|
||||||
if is_one_query:
|
|
||||||
out = rearrange(out, 'b 1 ... -> b ...')
|
|
||||||
|
|
||||||
return out
|
|
||||||
|
|
||||||
class QueryAttnUpsample(nn.Module):
|
|
||||||
def __init__(self, dim, **kwargs):
|
|
||||||
super().__init__()
|
|
||||||
self.norm = LayerNormChan(dim)
|
|
||||||
self.qna = QueryAndAttend(dim = dim, num_queries = 4, **kwargs)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
x = self.norm(x)
|
|
||||||
out = self.qna(x)
|
|
||||||
out = rearrange(out, 'b (w1 w2) c h w -> b c (h w1) (w w2)', w1 = 2, w2 = 2)
|
|
||||||
return out
|
|
||||||
@@ -7,6 +7,7 @@ from contextlib import contextmanager
|
|||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from torch import nn, einsum
|
from torch import nn, einsum
|
||||||
|
import torchvision.transforms as T
|
||||||
|
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from einops.layers.torch import Rearrange
|
from einops.layers.torch import Rearrange
|
||||||
@@ -17,7 +18,6 @@ from kornia.filters import gaussian_blur2d
|
|||||||
|
|
||||||
from dalle2_pytorch.tokenizer import tokenizer
|
from dalle2_pytorch.tokenizer import tokenizer
|
||||||
from dalle2_pytorch.vqgan_vae import NullVQGanVAE, VQGanVAE
|
from dalle2_pytorch.vqgan_vae import NullVQGanVAE, VQGanVAE
|
||||||
from dalle2_pytorch.attention import QueryAttnUpsample
|
|
||||||
|
|
||||||
# use x-clip
|
# use x-clip
|
||||||
|
|
||||||
@@ -36,6 +36,10 @@ def default(val, d):
|
|||||||
def cast_tuple(val, length = 1):
|
def cast_tuple(val, length = 1):
|
||||||
return val if isinstance(val, tuple) else ((val,) * length)
|
return val if isinstance(val, tuple) else ((val,) * length)
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def null_context(*args, **kwargs):
|
||||||
|
yield
|
||||||
|
|
||||||
def eval_decorator(fn):
|
def eval_decorator(fn):
|
||||||
def inner(model, *args, **kwargs):
|
def inner(model, *args, **kwargs):
|
||||||
was_training = model.training
|
was_training = model.training
|
||||||
@@ -86,6 +90,59 @@ def resize_image_to(t, image_size, mode = 'bilinear'): # take a look at https://
|
|||||||
|
|
||||||
return F.interpolate(t, size = shape, mode = mode, align_corners = False)
|
return F.interpolate(t, size = shape, mode = mode, align_corners = False)
|
||||||
|
|
||||||
|
# clip related adapters
|
||||||
|
|
||||||
|
class BaseClipAdapter(nn.Module):
|
||||||
|
def __init__(self, clip):
|
||||||
|
super().__init__()
|
||||||
|
self.clip = clip
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dim_latent(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@property
|
||||||
|
def image_size(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@property
|
||||||
|
def image_channels(self):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def embed_text(self, text):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def embed_image(self, image):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
class XClipAdapter(BaseClipAdapter):
|
||||||
|
@property
|
||||||
|
def dim_latent(self):
|
||||||
|
return self.clip.dim_latent
|
||||||
|
|
||||||
|
@property
|
||||||
|
def image_size(self):
|
||||||
|
return self.clip.image_size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def image_channels(self):
|
||||||
|
return self.clip.image_channels
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def embed_text(self, text):
|
||||||
|
encoder_output = self.clip.text_transformer(text)
|
||||||
|
text_cls, text_encodings = encoder_output[:, 0], encoder_output[:, 1:]
|
||||||
|
text_embed = self.clip.to_text_latent(text_cls)
|
||||||
|
return l2norm(text_embed), text_encodings
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def embed_image(self, image):
|
||||||
|
image = resize_image_to(image, self.image_size)
|
||||||
|
encoder_output = self.clip.visual_transformer(image)
|
||||||
|
image_cls, image_encodings = encoder_output[:, 0], encoder_output[:, 1:]
|
||||||
|
image_embed = self.clip.to_visual_latent(image_cls)
|
||||||
|
return l2norm(image_embed), image_encodings
|
||||||
|
|
||||||
# classifier free guidance functions
|
# classifier free guidance functions
|
||||||
|
|
||||||
def prob_mask_like(shape, prob, device):
|
def prob_mask_like(shape, prob, device):
|
||||||
@@ -590,7 +647,10 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if exists(clip):
|
if exists(clip):
|
||||||
assert isinstance(clip, CLIP)
|
if isinstance(clip, CLIP):
|
||||||
|
clip = XClipAdapter(clip)
|
||||||
|
|
||||||
|
assert isinstance(clip, BaseClipAdapter)
|
||||||
freeze_model_and_make_eval_(clip)
|
freeze_model_and_make_eval_(clip)
|
||||||
self.clip = clip
|
self.clip = clip
|
||||||
else:
|
else:
|
||||||
@@ -607,29 +667,6 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
self.predict_x_start = predict_x_start
|
self.predict_x_start = predict_x_start
|
||||||
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
|
# in paper, they do not predict the noise, but predict x0 directly for image embedding, claiming empirically better results. I'll just offer both.
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def get_image_embed(self, image):
|
|
||||||
assert exists(self.clip)
|
|
||||||
|
|
||||||
image_encoding = self.clip.visual_transformer(image)
|
|
||||||
image_cls = image_encoding[:, 0]
|
|
||||||
image_embed = self.clip.to_visual_latent(image_cls)
|
|
||||||
return l2norm(image_embed)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def get_text_cond(self, text):
|
|
||||||
assert exists(self.clip)
|
|
||||||
|
|
||||||
text_encodings = self.clip.text_transformer(text)
|
|
||||||
text_cls, text_encodings = text_encodings[:, 0], text_encodings[:, 1:]
|
|
||||||
text_embed = self.clip.to_text_latent(text_cls)
|
|
||||||
text_embed = l2norm(text_embed)
|
|
||||||
|
|
||||||
if not self.condition_on_text_encodings:
|
|
||||||
return dict(text_embed = text_embed)
|
|
||||||
|
|
||||||
return dict(text_encodings = text_encodings, text_embed = text_embed, mask = text != 0)
|
|
||||||
|
|
||||||
def p_mean_variance(self, x, t, text_cond, clip_denoised: bool):
|
def p_mean_variance(self, x, t, text_cond, clip_denoised: bool):
|
||||||
pred = self.net(x, t, **text_cond)
|
pred = self.net(x, t, **text_cond)
|
||||||
|
|
||||||
@@ -701,7 +738,12 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
batch_size = text.shape[0]
|
batch_size = text.shape[0]
|
||||||
image_embed_dim = self.image_embed_dim
|
image_embed_dim = self.image_embed_dim
|
||||||
|
|
||||||
text_cond = self.get_text_cond(text)
|
text_embed, text_encodings = self.clip.embed_text(text)
|
||||||
|
|
||||||
|
text_cond = dict(text_embed = text_embed)
|
||||||
|
|
||||||
|
if self.condition_on_text_encodings:
|
||||||
|
text_cond = {**text_cond, 'text_encodings': text_encodings, 'mask': text != 0}
|
||||||
|
|
||||||
image_embeds = self.p_sample_loop((batch_size, image_embed_dim), text_cond = text_cond)
|
image_embeds = self.p_sample_loop((batch_size, image_embed_dim), text_cond = text_cond)
|
||||||
text_embeds = text_cond['text_embed']
|
text_embeds = text_cond['text_embed']
|
||||||
@@ -733,18 +775,19 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
assert not (self.condition_on_text_encodings and (not exists(text_encodings) and not exists(text))), 'text encodings must be present if you specified you wish to condition on it on initialization'
|
assert not (self.condition_on_text_encodings and (not exists(text_encodings) and not exists(text))), 'text encodings must be present if you specified you wish to condition on it on initialization'
|
||||||
|
|
||||||
if exists(image):
|
if exists(image):
|
||||||
image_embed = self.get_image_embed(image)
|
image_embed, _ = self.clip.embed_image(image)
|
||||||
|
|
||||||
# calculate text conditionings, based on what is passed in
|
# calculate text conditionings, based on what is passed in
|
||||||
|
|
||||||
if exists(text):
|
if exists(text):
|
||||||
text_cond = self.get_text_cond(text)
|
text_embed, text_encodings = self.clip.embed_text(text)
|
||||||
else:
|
text_mask = text != 0
|
||||||
text_cond = dict(
|
|
||||||
text_embed = text_embed,
|
text_cond = dict(text_embed = text_embed)
|
||||||
text_encodings = text_encodings,
|
|
||||||
mask = text_mask
|
if self.condition_on_text_encodings:
|
||||||
)
|
assert exists(text_encodings), 'text encodings must be present for diffusion prior if specified'
|
||||||
|
text_cond = {**text_cond, 'text_encodings': text_encodings, 'mask': text_mask}
|
||||||
|
|
||||||
# timestep conditioning from ddpm
|
# timestep conditioning from ddpm
|
||||||
|
|
||||||
@@ -753,8 +796,7 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
|
|
||||||
# calculate forward loss
|
# calculate forward loss
|
||||||
|
|
||||||
loss = self.p_losses(image_embed, times, text_cond = text_cond, *args, **kwargs)
|
return self.p_losses(image_embed, times, text_cond = text_cond, *args, **kwargs)
|
||||||
return loss
|
|
||||||
|
|
||||||
# decoder
|
# decoder
|
||||||
|
|
||||||
@@ -1205,8 +1247,12 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
loss_type = loss_type
|
loss_type = loss_type
|
||||||
)
|
)
|
||||||
|
|
||||||
assert isinstance(clip, CLIP)
|
if isinstance(clip, CLIP):
|
||||||
|
clip = XClipAdapter(clip)
|
||||||
|
|
||||||
freeze_model_and_make_eval_(clip)
|
freeze_model_and_make_eval_(clip)
|
||||||
|
assert isinstance(clip, BaseClipAdapter)
|
||||||
|
|
||||||
self.clip = clip
|
self.clip = clip
|
||||||
self.clip_image_size = clip.image_size
|
self.clip_image_size = clip.image_size
|
||||||
self.channels = clip.image_channels
|
self.channels = clip.image_channels
|
||||||
@@ -1287,10 +1333,6 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
yield
|
yield
|
||||||
unet.cpu()
|
unet.cpu()
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def get_text_encodings(self, text):
|
|
||||||
text_encodings = self.clip.text_transformer(text)
|
|
||||||
return text_encodings[:, 1:]
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def get_image_embed(self, image):
|
def get_image_embed(self, image):
|
||||||
@@ -1376,14 +1418,20 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
def sample(self, image_embed, text = None, cond_scale = 1.):
|
def sample(self, image_embed, text = None, cond_scale = 1.):
|
||||||
batch_size = image_embed.shape[0]
|
batch_size = image_embed.shape[0]
|
||||||
|
|
||||||
text_encodings = self.get_text_encodings(text) if exists(text) else None
|
text_encodings = None
|
||||||
|
if exists(text):
|
||||||
|
_, text_encodings = self.clip.embed_text(text)
|
||||||
|
|
||||||
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
||||||
|
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
|
||||||
|
|
||||||
img = None
|
img = None
|
||||||
|
|
||||||
for unet, vae, channel, image_size, predict_x_start in tqdm(zip(self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start)):
|
for unet, vae, channel, image_size, predict_x_start in tqdm(zip(self.unets, self.vaes, self.sample_channels, self.image_sizes, self.predict_x_start)):
|
||||||
with self.one_unet_in_gpu(unet = unet):
|
|
||||||
|
context = self.one_unet_in_gpu(unet = unet) if image_embed.is_cuda else null_context()
|
||||||
|
|
||||||
|
with context:
|
||||||
lowres_cond_img = None
|
lowres_cond_img = None
|
||||||
shape = (batch_size, channel, image_size, image_size)
|
shape = (batch_size, channel, image_size, image_size)
|
||||||
|
|
||||||
@@ -1436,11 +1484,14 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
times = torch.randint(0, self.num_timesteps, (b,), device = device, dtype = torch.long)
|
times = torch.randint(0, self.num_timesteps, (b,), device = device, dtype = torch.long)
|
||||||
|
|
||||||
if not exists(image_embed):
|
if not exists(image_embed):
|
||||||
image_embed = self.get_image_embed(image)
|
image_embed, _ = self.clip.embed_image(image)
|
||||||
|
|
||||||
text_encodings = self.get_text_encodings(text) if exists(text) and not exists(text_encodings) else None
|
text_encodings = None
|
||||||
|
if exists(text) and not exists(text_encodings):
|
||||||
|
_, text_encodings = self.clip.embed_text(text)
|
||||||
|
|
||||||
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
||||||
|
assert not (not self.condition_on_text_encodings and exists(text_encodings)), 'decoder specified not to be conditioned on text, yet it is presented'
|
||||||
|
|
||||||
lowres_cond_img = self.to_lowres_cond(image, target_image_size = target_image_size, downsample_image_size = self.image_sizes[unet_index - 1]) if unet_number > 1 else None
|
lowres_cond_img = self.to_lowres_cond(image, target_image_size = target_image_size, downsample_image_size = self.image_sizes[unet_index - 1]) if unet_number > 1 else None
|
||||||
image = resize_image_to(image, target_image_size)
|
image = resize_image_to(image, target_image_size)
|
||||||
@@ -1473,12 +1524,15 @@ class DALLE2(nn.Module):
|
|||||||
self.prior_num_samples = prior_num_samples
|
self.prior_num_samples = prior_num_samples
|
||||||
self.decoder_need_text_cond = self.decoder.condition_on_text_encodings
|
self.decoder_need_text_cond = self.decoder.condition_on_text_encodings
|
||||||
|
|
||||||
|
self.to_pil = T.ToPILImage()
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@eval_decorator
|
@eval_decorator
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
text,
|
text,
|
||||||
cond_scale = 1.
|
cond_scale = 1.,
|
||||||
|
return_pil_images = False
|
||||||
):
|
):
|
||||||
device = next(self.parameters()).device
|
device = next(self.parameters()).device
|
||||||
one_text = isinstance(text, str) or (not is_list_str(text) and text.shape[0] == 1)
|
one_text = isinstance(text, str) or (not is_list_str(text) and text.shape[0] == 1)
|
||||||
@@ -1492,7 +1546,11 @@ class DALLE2(nn.Module):
|
|||||||
text_cond = text if self.decoder_need_text_cond else None
|
text_cond = text if self.decoder_need_text_cond else None
|
||||||
images = self.decoder.sample(image_embed, text = text_cond, cond_scale = cond_scale)
|
images = self.decoder.sample(image_embed, text = text_cond, cond_scale = cond_scale)
|
||||||
|
|
||||||
|
if return_pil_images:
|
||||||
|
images = list(map(self.to_pil, images.unbind(dim = 0)))
|
||||||
|
|
||||||
if one_text:
|
if one_text:
|
||||||
return images[0]
|
return images[0]
|
||||||
|
|
||||||
return images
|
return images
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class EMA(nn.Module):
|
|||||||
|
|
||||||
self.update_moving_average(self.ema_model, self.online_model)
|
self.update_moving_average(self.ema_model, self.online_model)
|
||||||
|
|
||||||
def update_moving_average(ma_model, current_model):
|
def update_moving_average(self, ma_model, current_model):
|
||||||
def calculate_ema(beta, old, new):
|
def calculate_ema(beta, old, new):
|
||||||
if not exists(old):
|
if not exists(old):
|
||||||
return new
|
return new
|
||||||
|
|||||||
@@ -12,8 +12,8 @@ from torch.autograd import grad as torch_grad
|
|||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
from einops import rearrange, reduce, repeat
|
from einops import rearrange, reduce, repeat
|
||||||
|
from einops_exts import rearrange_many
|
||||||
from dalle2_pytorch.attention import QueryAttnUpsample
|
from einops.layers.torch import Rearrange
|
||||||
|
|
||||||
# constants
|
# constants
|
||||||
|
|
||||||
@@ -146,6 +146,8 @@ class LayerNormChan(nn.Module):
|
|||||||
mean = torch.mean(x, dim = 1, keepdim = True)
|
mean = torch.mean(x, dim = 1, keepdim = True)
|
||||||
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
return (x - mean) / (var + self.eps).sqrt() * self.gamma
|
||||||
|
|
||||||
|
# discriminator
|
||||||
|
|
||||||
class Discriminator(nn.Module):
|
class Discriminator(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -179,6 +181,8 @@ class Discriminator(nn.Module):
|
|||||||
|
|
||||||
return self.to_logits(x)
|
return self.to_logits(x)
|
||||||
|
|
||||||
|
# positional encoding
|
||||||
|
|
||||||
class ContinuousPositionBias(nn.Module):
|
class ContinuousPositionBias(nn.Module):
|
||||||
""" from https://arxiv.org/abs/2111.09883 """
|
""" from https://arxiv.org/abs/2111.09883 """
|
||||||
|
|
||||||
@@ -213,6 +217,84 @@ class ContinuousPositionBias(nn.Module):
|
|||||||
bias = rearrange(rel_pos, 'i j h -> h i j')
|
bias = rearrange(rel_pos, 'i j h -> h i j')
|
||||||
return x + bias
|
return x + bias
|
||||||
|
|
||||||
|
# resnet encoder / decoder
|
||||||
|
|
||||||
|
class ResnetEncDec(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
channels = 3,
|
||||||
|
layers = 4,
|
||||||
|
layer_mults = None,
|
||||||
|
num_resnet_blocks = 1,
|
||||||
|
resnet_groups = 16,
|
||||||
|
first_conv_kernel_size = 5,
|
||||||
|
use_attn = True,
|
||||||
|
attn_dim_head = 64,
|
||||||
|
attn_heads = 8,
|
||||||
|
attn_dropout = 0.,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
|
||||||
|
|
||||||
|
self.layers = layers
|
||||||
|
|
||||||
|
self.encoders = MList([])
|
||||||
|
self.decoders = MList([])
|
||||||
|
|
||||||
|
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
|
||||||
|
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
|
||||||
|
|
||||||
|
layer_dims = [dim * mult for mult in layer_mults]
|
||||||
|
dims = (dim, *layer_dims)
|
||||||
|
|
||||||
|
self.encoded_dim = dims[-1]
|
||||||
|
|
||||||
|
dim_pairs = zip(dims[:-1], dims[1:])
|
||||||
|
|
||||||
|
append = lambda arr, t: arr.append(t)
|
||||||
|
prepend = lambda arr, t: arr.insert(0, t)
|
||||||
|
|
||||||
|
if not isinstance(num_resnet_blocks, tuple):
|
||||||
|
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
|
||||||
|
|
||||||
|
if not isinstance(use_attn, tuple):
|
||||||
|
use_attn = (*((False,) * (layers - 1)), use_attn)
|
||||||
|
|
||||||
|
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
|
||||||
|
assert len(use_attn) == layers
|
||||||
|
|
||||||
|
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(layers), dim_pairs, num_resnet_blocks, use_attn):
|
||||||
|
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
|
||||||
|
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
|
||||||
|
|
||||||
|
if layer_use_attn:
|
||||||
|
prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
||||||
|
|
||||||
|
for _ in range(layer_num_resnet_blocks):
|
||||||
|
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
|
||||||
|
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
|
||||||
|
|
||||||
|
if layer_use_attn:
|
||||||
|
append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
||||||
|
|
||||||
|
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
|
||||||
|
append(self.decoders, nn.Conv2d(dim, channels, 1))
|
||||||
|
|
||||||
|
def get_encoded_fmap_size(self, image_size):
|
||||||
|
return image_size // (2 ** self.layers)
|
||||||
|
|
||||||
|
def encode(self, x):
|
||||||
|
for enc in self.encoders:
|
||||||
|
x = enc(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
def decode(self, x):
|
||||||
|
for dec in self.decoders:
|
||||||
|
x = dec(x)
|
||||||
|
return x
|
||||||
|
|
||||||
class GLUResBlock(nn.Module):
|
class GLUResBlock(nn.Module):
|
||||||
def __init__(self, chan, groups = 16):
|
def __init__(self, chan, groups = 16):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -246,6 +328,7 @@ class ResBlock(nn.Module):
|
|||||||
return self.net(x) + x
|
return self.net(x) + x
|
||||||
|
|
||||||
# vqgan attention layer
|
# vqgan attention layer
|
||||||
|
|
||||||
class VQGanAttention(nn.Module):
|
class VQGanAttention(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -290,6 +373,145 @@ class VQGanAttention(nn.Module):
|
|||||||
|
|
||||||
return out + residual
|
return out + residual
|
||||||
|
|
||||||
|
# ViT encoder / decoder
|
||||||
|
|
||||||
|
class RearrangeImage(nn.Module):
|
||||||
|
def forward(self, x):
|
||||||
|
n = x.shape[1]
|
||||||
|
w = h = int(sqrt(n))
|
||||||
|
return rearrange(x, 'b (h w) ... -> b h w ...', h = h, w = w)
|
||||||
|
|
||||||
|
class Attention(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
heads = 8,
|
||||||
|
dim_head = 32
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.norm = nn.LayerNorm(dim)
|
||||||
|
self.heads = heads
|
||||||
|
self.scale = dim_head ** -0.5
|
||||||
|
inner_dim = dim_head * heads
|
||||||
|
|
||||||
|
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
|
||||||
|
self.to_out = nn.Linear(inner_dim, dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
h = self.heads
|
||||||
|
|
||||||
|
x = self.norm(x)
|
||||||
|
|
||||||
|
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
|
||||||
|
q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)
|
||||||
|
|
||||||
|
q = q * self.scale
|
||||||
|
sim = einsum('b h i d, b h j d -> b h i j', q, k)
|
||||||
|
|
||||||
|
sim = sim - sim.amax(dim = -1, keepdim = True).detach()
|
||||||
|
attn = sim.softmax(dim = -1)
|
||||||
|
|
||||||
|
out = einsum('b h i j, b h j d -> b h i d', attn, v)
|
||||||
|
|
||||||
|
out = rearrange(out, 'b h n d -> b n (h d)')
|
||||||
|
return self.to_out(out)
|
||||||
|
|
||||||
|
def FeedForward(dim, mult = 4):
|
||||||
|
return nn.Sequential(
|
||||||
|
nn.LayerNorm(dim),
|
||||||
|
nn.Linear(dim, dim * mult, bias = False),
|
||||||
|
nn.GELU(),
|
||||||
|
nn.Linear(dim * mult, dim, bias = False)
|
||||||
|
)
|
||||||
|
|
||||||
|
class Transformer(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
*,
|
||||||
|
layers,
|
||||||
|
dim_head = 32,
|
||||||
|
heads = 8,
|
||||||
|
ff_mult = 4
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.layers = nn.ModuleList([])
|
||||||
|
for _ in range(layers):
|
||||||
|
self.layers.append(nn.ModuleList([
|
||||||
|
Attention(dim = dim, dim_head = dim_head, heads = heads),
|
||||||
|
FeedForward(dim = dim, mult = ff_mult)
|
||||||
|
]))
|
||||||
|
|
||||||
|
self.norm = nn.LayerNorm(dim)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
for attn, ff in self.layers:
|
||||||
|
x = attn(x) + x
|
||||||
|
x = ff(x) + x
|
||||||
|
|
||||||
|
return self.norm(x)
|
||||||
|
|
||||||
|
class ViTEncDec(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dim,
|
||||||
|
channels = 3,
|
||||||
|
layers = 4,
|
||||||
|
patch_size = 8,
|
||||||
|
dim_head = 32,
|
||||||
|
heads = 8,
|
||||||
|
ff_mult = 4
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.encoded_dim = dim
|
||||||
|
self.patch_size = patch_size
|
||||||
|
|
||||||
|
input_dim = channels * (patch_size ** 2)
|
||||||
|
|
||||||
|
self.encoder = nn.Sequential(
|
||||||
|
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
|
||||||
|
nn.Linear(input_dim, dim),
|
||||||
|
Transformer(
|
||||||
|
dim = dim,
|
||||||
|
dim_head = dim_head,
|
||||||
|
heads = heads,
|
||||||
|
ff_mult = ff_mult,
|
||||||
|
layers = layers
|
||||||
|
),
|
||||||
|
RearrangeImage(),
|
||||||
|
Rearrange('b h w c -> b c h w')
|
||||||
|
)
|
||||||
|
|
||||||
|
self.decoder = nn.Sequential(
|
||||||
|
Rearrange('b c h w -> b (h w) c'),
|
||||||
|
Transformer(
|
||||||
|
dim = dim,
|
||||||
|
dim_head = dim_head,
|
||||||
|
heads = heads,
|
||||||
|
ff_mult = ff_mult,
|
||||||
|
layers = layers
|
||||||
|
),
|
||||||
|
nn.Sequential(
|
||||||
|
nn.Linear(dim, dim * 4, bias = False),
|
||||||
|
nn.Tanh(),
|
||||||
|
nn.Linear(dim * 4, input_dim, bias = False),
|
||||||
|
),
|
||||||
|
RearrangeImage(),
|
||||||
|
Rearrange('b h w (p1 p2 c) -> b c (h p1) (w p2)', p1 = patch_size, p2 = patch_size)
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_encoded_fmap_size(self, image_size):
|
||||||
|
return image_size // self.patch_size
|
||||||
|
|
||||||
|
def encode(self, x):
|
||||||
|
return self.encoder(x)
|
||||||
|
|
||||||
|
def decode(self, x):
|
||||||
|
return self.decoder(x)
|
||||||
|
|
||||||
|
# main vqgan-vae classes
|
||||||
|
|
||||||
class NullVQGanVAE(nn.Module):
|
class NullVQGanVAE(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -320,81 +542,45 @@ class VQGanVAE(nn.Module):
|
|||||||
image_size,
|
image_size,
|
||||||
channels = 3,
|
channels = 3,
|
||||||
layers = 4,
|
layers = 4,
|
||||||
layer_mults = None,
|
|
||||||
l2_recon_loss = False,
|
l2_recon_loss = False,
|
||||||
use_hinge_loss = True,
|
use_hinge_loss = True,
|
||||||
num_resnet_blocks = 1,
|
|
||||||
vgg = None,
|
vgg = None,
|
||||||
|
vq_codebook_dim = 256,
|
||||||
vq_codebook_size = 512,
|
vq_codebook_size = 512,
|
||||||
vq_decay = 0.8,
|
vq_decay = 0.8,
|
||||||
vq_commitment_weight = 1.,
|
vq_commitment_weight = 1.,
|
||||||
vq_kmeans_init = True,
|
vq_kmeans_init = True,
|
||||||
vq_use_cosine_sim = True,
|
vq_use_cosine_sim = True,
|
||||||
use_attn = True,
|
|
||||||
attn_dim_head = 64,
|
|
||||||
attn_heads = 8,
|
|
||||||
resnet_groups = 16,
|
|
||||||
attn_dropout = 0.,
|
|
||||||
first_conv_kernel_size = 5,
|
|
||||||
use_vgg_and_gan = True,
|
use_vgg_and_gan = True,
|
||||||
|
vae_type = 'resnet',
|
||||||
|
discr_layers = 4,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
|
|
||||||
|
|
||||||
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
|
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
|
||||||
|
encdec_kwargs, kwargs = groupby_prefix_and_trim('encdec_', kwargs)
|
||||||
|
|
||||||
self.image_size = image_size
|
self.image_size = image_size
|
||||||
self.channels = channels
|
self.channels = channels
|
||||||
self.layers = layers
|
|
||||||
self.fmap_size = image_size // (layers ** 2)
|
|
||||||
self.codebook_size = vq_codebook_size
|
self.codebook_size = vq_codebook_size
|
||||||
|
|
||||||
self.encoders = MList([])
|
if vae_type == 'resnet':
|
||||||
self.decoders = MList([])
|
enc_dec_klass = ResnetEncDec
|
||||||
|
elif vae_type == 'vit':
|
||||||
|
enc_dec_klass = ViTEncDec
|
||||||
|
else:
|
||||||
|
raise ValueError(f'{vae_type} not valid')
|
||||||
|
|
||||||
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
|
self.enc_dec = enc_dec_klass(
|
||||||
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
|
dim = dim,
|
||||||
|
channels = channels,
|
||||||
layer_dims = [dim * mult for mult in layer_mults]
|
layers = layers,
|
||||||
dims = (dim, *layer_dims)
|
**encdec_kwargs
|
||||||
codebook_dim = layer_dims[-1]
|
)
|
||||||
|
|
||||||
self.encoded_dim = dims[-1]
|
|
||||||
|
|
||||||
dim_pairs = zip(dims[:-1], dims[1:])
|
|
||||||
|
|
||||||
append = lambda arr, t: arr.append(t)
|
|
||||||
prepend = lambda arr, t: arr.insert(0, t)
|
|
||||||
|
|
||||||
if not isinstance(num_resnet_blocks, tuple):
|
|
||||||
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
|
|
||||||
|
|
||||||
if not isinstance(use_attn, tuple):
|
|
||||||
use_attn = (*((False,) * (layers - 1)), use_attn)
|
|
||||||
|
|
||||||
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
|
|
||||||
assert len(use_attn) == layers
|
|
||||||
|
|
||||||
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks, layer_use_attn in zip(range(layers), dim_pairs, num_resnet_blocks, use_attn):
|
|
||||||
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
|
|
||||||
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
|
|
||||||
|
|
||||||
if layer_use_attn:
|
|
||||||
prepend(self.decoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
|
||||||
|
|
||||||
for _ in range(layer_num_resnet_blocks):
|
|
||||||
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
|
|
||||||
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
|
|
||||||
|
|
||||||
if layer_use_attn:
|
|
||||||
append(self.encoders, VQGanAttention(dim = dim_out, heads = attn_heads, dim_head = attn_dim_head, dropout = attn_dropout))
|
|
||||||
|
|
||||||
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
|
|
||||||
append(self.decoders, nn.Conv2d(dim, channels, 1))
|
|
||||||
|
|
||||||
self.vq = VQ(
|
self.vq = VQ(
|
||||||
dim = codebook_dim,
|
dim = self.enc_dec.encoded_dim,
|
||||||
|
codebook_dim = vq_codebook_dim,
|
||||||
codebook_size = vq_codebook_size,
|
codebook_size = vq_codebook_size,
|
||||||
decay = vq_decay,
|
decay = vq_decay,
|
||||||
commitment_weight = vq_commitment_weight,
|
commitment_weight = vq_commitment_weight,
|
||||||
@@ -427,13 +613,21 @@ class VQGanVAE(nn.Module):
|
|||||||
|
|
||||||
# gan related losses
|
# gan related losses
|
||||||
|
|
||||||
|
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
|
||||||
|
layer_dims = [dim * mult for mult in layer_mults]
|
||||||
|
dims = (dim, *layer_dims)
|
||||||
|
|
||||||
self.discr = Discriminator(dims = dims, channels = channels)
|
self.discr = Discriminator(dims = dims, channels = channels)
|
||||||
|
|
||||||
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
|
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
|
||||||
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
|
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
|
||||||
|
|
||||||
|
@property
|
||||||
|
def encoded_dim(self):
|
||||||
|
return self.enc_dec.encoded_dim
|
||||||
|
|
||||||
def get_encoded_fmap_size(self, image_size):
|
def get_encoded_fmap_size(self, image_size):
|
||||||
return image_size // (2 ** self.layers)
|
return self.enc_dec.get_encoded_fmap_size(image_size)
|
||||||
|
|
||||||
def copy_for_eval(self):
|
def copy_for_eval(self):
|
||||||
device = next(self.parameters()).device
|
device = next(self.parameters()).device
|
||||||
@@ -459,16 +653,13 @@ class VQGanVAE(nn.Module):
|
|||||||
return self.vq.codebook
|
return self.vq.codebook
|
||||||
|
|
||||||
def encode(self, fmap):
|
def encode(self, fmap):
|
||||||
for enc in self.encoders:
|
fmap = self.enc_dec.encode(fmap)
|
||||||
fmap = enc(fmap)
|
|
||||||
|
|
||||||
return fmap
|
return fmap
|
||||||
|
|
||||||
def decode(self, fmap, return_indices_and_loss = False):
|
def decode(self, fmap, return_indices_and_loss = False):
|
||||||
fmap, indices, commit_loss = self.vq(fmap)
|
fmap, indices, commit_loss = self.vq(fmap)
|
||||||
|
|
||||||
for dec in self.decoders:
|
fmap = self.enc_dec.decode(fmap)
|
||||||
fmap = dec(fmap)
|
|
||||||
|
|
||||||
if not return_indices_and_loss:
|
if not return_indices_and_loss:
|
||||||
return fmap
|
return fmap
|
||||||
|
|||||||
Reference in New Issue
Block a user