mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 17:24:25 +01:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9232b01ff6 |
@@ -1870,14 +1870,13 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def p_sample_loop(self, unet, shape, image_embed, predict_x_start = False, learned_variance = False, clip_denoised = True, lowres_cond_img = None, text_encodings = None, text_mask = None, cond_scale = 1, is_latent_diffusion = False):
|
def p_sample_loop(self, unet, shape, image_embed, predict_x_start = False, learned_variance = False, clip_denoised = True, lowres_cond_img = None, text_encodings = None, text_mask = None, cond_scale = 1):
|
||||||
device = self.betas.device
|
device = self.betas.device
|
||||||
|
|
||||||
b = shape[0]
|
b = shape[0]
|
||||||
img = torch.randn(shape, device = device)
|
img = torch.randn(shape, device = device)
|
||||||
|
|
||||||
if not is_latent_diffusion:
|
lowres_cond_img = maybe(normalize_neg_one_to_one)(lowres_cond_img)
|
||||||
lowres_cond_img = maybe(normalize_neg_one_to_one)(lowres_cond_img)
|
|
||||||
|
|
||||||
for i in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
|
for i in tqdm(reversed(range(0, self.num_timesteps)), desc = 'sampling loop time step', total = self.num_timesteps):
|
||||||
img = self.p_sample(
|
img = self.p_sample(
|
||||||
@@ -1897,14 +1896,13 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
unnormalize_img = unnormalize_zero_to_one(img)
|
unnormalize_img = unnormalize_zero_to_one(img)
|
||||||
return unnormalize_img
|
return unnormalize_img
|
||||||
|
|
||||||
def p_losses(self, unet, x_start, times, *, image_embed, lowres_cond_img = None, text_encodings = None, text_mask = None, predict_x_start = False, noise = None, learned_variance = False, clip_denoised = False, is_latent_diffusion = False):
|
def p_losses(self, unet, x_start, times, *, image_embed, lowres_cond_img = None, text_encodings = None, text_mask = None, predict_x_start = False, noise = None, learned_variance = False, clip_denoised = False):
|
||||||
noise = default(noise, lambda: torch.randn_like(x_start))
|
noise = default(noise, lambda: torch.randn_like(x_start))
|
||||||
|
|
||||||
# normalize to [-1, 1]
|
# normalize to [-1, 1]
|
||||||
|
|
||||||
if not is_latent_diffusion:
|
x_start = normalize_neg_one_to_one(x_start)
|
||||||
x_start = normalize_neg_one_to_one(x_start)
|
lowres_cond_img = maybe(normalize_neg_one_to_one)(lowres_cond_img)
|
||||||
lowres_cond_img = maybe(normalize_neg_one_to_one)(lowres_cond_img)
|
|
||||||
|
|
||||||
# get x_t
|
# get x_t
|
||||||
|
|
||||||
@@ -1982,7 +1980,7 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
batch_size = image_embed.shape[0]
|
batch_size = image_embed.shape[0]
|
||||||
|
|
||||||
if exists(text) and not exists(text_encodings) and not self.unconditional:
|
if exists(text) and not exists(text_encodings) and not self.unconditional:
|
||||||
assert exists(self.clip)
|
assert exist(self.clip)
|
||||||
_, text_encodings, text_mask = self.clip.embed_text(text)
|
_, text_encodings, text_mask = self.clip.embed_text(text)
|
||||||
|
|
||||||
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
assert not (self.condition_on_text_encodings and not exists(text_encodings)), 'text or text encodings must be passed into decoder if specified'
|
||||||
@@ -2018,8 +2016,7 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
predict_x_start = predict_x_start,
|
predict_x_start = predict_x_start,
|
||||||
learned_variance = learned_variance,
|
learned_variance = learned_variance,
|
||||||
clip_denoised = not is_latent_diffusion,
|
clip_denoised = not is_latent_diffusion,
|
||||||
lowres_cond_img = lowres_cond_img,
|
lowres_cond_img = lowres_cond_img
|
||||||
is_latent_diffusion = is_latent_diffusion
|
|
||||||
)
|
)
|
||||||
|
|
||||||
img = vae.decode(img)
|
img = vae.decode(img)
|
||||||
@@ -2078,14 +2075,12 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
image = aug(image)
|
image = aug(image)
|
||||||
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
|
lowres_cond_img = aug(lowres_cond_img, params = aug._params)
|
||||||
|
|
||||||
is_latent_diffusion = not isinstance(vae, NullVQGanVAE)
|
|
||||||
|
|
||||||
vae.eval()
|
vae.eval()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
image = vae.encode(image)
|
image = vae.encode(image)
|
||||||
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
|
lowres_cond_img = maybe(vae.encode)(lowres_cond_img)
|
||||||
|
|
||||||
return self.p_losses(unet, image, times, image_embed = image_embed, text_encodings = text_encodings, text_mask = text_mask, lowres_cond_img = lowres_cond_img, predict_x_start = predict_x_start, learned_variance = learned_variance, is_latent_diffusion = is_latent_diffusion)
|
return self.p_losses(unet, image, times, image_embed = image_embed, text_encodings = text_encodings, text_mask = text_mask, lowres_cond_img = lowres_cond_img, predict_x_start = predict_x_start, learned_variance = learned_variance)
|
||||||
|
|
||||||
# main class
|
# main class
|
||||||
|
|
||||||
|
|||||||
@@ -1,59 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from torch.utils import data
|
|
||||||
from torchvision import transforms, utils
|
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
# helpers functions
|
|
||||||
|
|
||||||
def cycle(dl):
|
|
||||||
while True:
|
|
||||||
for data in dl:
|
|
||||||
yield data
|
|
||||||
|
|
||||||
# dataset and dataloader
|
|
||||||
|
|
||||||
class Dataset(data.Dataset):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
folder,
|
|
||||||
image_size,
|
|
||||||
exts = ['jpg', 'jpeg', 'png']
|
|
||||||
):
|
|
||||||
super().__init__()
|
|
||||||
self.folder = folder
|
|
||||||
self.image_size = image_size
|
|
||||||
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
|
|
||||||
|
|
||||||
self.transform = transforms.Compose([
|
|
||||||
transforms.Resize(image_size),
|
|
||||||
transforms.RandomHorizontalFlip(),
|
|
||||||
transforms.CenterCrop(image_size),
|
|
||||||
transforms.ToTensor()
|
|
||||||
])
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.paths)
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
|
||||||
path = self.paths[index]
|
|
||||||
img = Image.open(path)
|
|
||||||
return self.transform(img)
|
|
||||||
|
|
||||||
def get_images_dataloader(
|
|
||||||
folder,
|
|
||||||
*,
|
|
||||||
batch_size,
|
|
||||||
image_size,
|
|
||||||
shuffle = True,
|
|
||||||
cycle_dl = True,
|
|
||||||
pin_memory = True
|
|
||||||
):
|
|
||||||
ds = Dataset(folder, image_size)
|
|
||||||
dl = data.DataLoader(ds, batch_size = batch_size, shuffle = shuffle, pin_memory = pin_memory)
|
|
||||||
|
|
||||||
if cycle_dl:
|
|
||||||
dl = cycle(dl)
|
|
||||||
return dl
|
|
||||||
@@ -179,8 +179,8 @@ class EMA(nn.Module):
|
|||||||
self.online_model = model
|
self.online_model = model
|
||||||
self.ema_model = copy.deepcopy(model)
|
self.ema_model = copy.deepcopy(model)
|
||||||
|
|
||||||
|
self.update_after_step = update_after_step # only start EMA after this step number, starting at 0
|
||||||
self.update_every = update_every
|
self.update_every = update_every
|
||||||
self.update_after_step = update_after_step // update_every # only start EMA after this step number, starting at 0
|
|
||||||
|
|
||||||
self.register_buffer('initted', torch.Tensor([False]))
|
self.register_buffer('initted', torch.Tensor([False]))
|
||||||
self.register_buffer('step', torch.tensor([0.]))
|
self.register_buffer('step', torch.tensor([0.]))
|
||||||
@@ -189,21 +189,14 @@ class EMA(nn.Module):
|
|||||||
device = self.initted.device
|
device = self.initted.device
|
||||||
self.ema_model.to(device)
|
self.ema_model.to(device)
|
||||||
|
|
||||||
def copy_params_from_model_to_ema(self):
|
|
||||||
self.ema_model.state_dict(self.online_model.state_dict())
|
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
self.step += 1
|
self.step += 1
|
||||||
|
|
||||||
if (self.step % self.update_every) != 0:
|
if self.step <= self.update_after_step or (self.step % self.update_every) != 0:
|
||||||
return
|
|
||||||
|
|
||||||
if self.step <= self.update_after_step:
|
|
||||||
self.copy_params_from_model_to_ema()
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if not self.initted:
|
if not self.initted:
|
||||||
self.copy_params_from_model_to_ema()
|
self.ema_model.state_dict(self.online_model.state_dict())
|
||||||
self.initted.data.copy_(torch.Tensor([True]))
|
self.initted.data.copy_(torch.Tensor([True]))
|
||||||
|
|
||||||
self.update_moving_average(self.ema_model, self.online_model)
|
self.update_moving_average(self.ema_model, self.online_model)
|
||||||
@@ -412,9 +405,6 @@ class DecoderTrainer(nn.Module):
|
|||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
@cast_torch_tensor
|
@cast_torch_tensor
|
||||||
def sample(self, *args, **kwargs):
|
def sample(self, *args, **kwargs):
|
||||||
if kwargs.pop('use_non_ema', False):
|
|
||||||
return self.decoder.sample(*args, **kwargs)
|
|
||||||
|
|
||||||
if self.use_ema:
|
if self.use_ema:
|
||||||
trainable_unets = self.decoder.unets
|
trainable_unets = self.decoder.unets
|
||||||
self.decoder.unets = self.unets # swap in exponential moving averaged unets for sampling
|
self.decoder.unets = self.unets # swap in exponential moving averaged unets for sampling
|
||||||
|
|||||||
Reference in New Issue
Block a user