mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-23 09:14:22 +01:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6520d17215 |
@@ -1126,7 +1126,6 @@ For detailed information on training the diffusion prior, please refer to the [d
|
|||||||
- [x] add inpainting ability using resampler from repaint paper https://arxiv.org/abs/2201.09865
|
- [x] add inpainting ability using resampler from repaint paper https://arxiv.org/abs/2201.09865
|
||||||
- [x] add the final combination of upsample feature maps, used in unet squared, seems to have an effect in local experiments
|
- [x] add the final combination of upsample feature maps, used in unet squared, seems to have an effect in local experiments
|
||||||
- [ ] consider elucidated dalle2 https://arxiv.org/abs/2206.00364
|
- [ ] consider elucidated dalle2 https://arxiv.org/abs/2206.00364
|
||||||
- [ ] add simple outpainting, text-guided 2x size the image for starters
|
|
||||||
- [ ] interface out the vqgan-vae so a pretrained one can be pulled off the shelf to validate latent diffusion + DALL-E2
|
- [ ] interface out the vqgan-vae so a pretrained one can be pulled off the shelf to validate latent diffusion + DALL-E2
|
||||||
|
|
||||||
## Citations
|
## Citations
|
||||||
|
|||||||
@@ -406,10 +406,7 @@ class OpenClipAdapter(BaseClipAdapter):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def image_size(self):
|
def image_size(self):
|
||||||
image_size = self.clip.visual.image_size
|
return self.clip.visual.image_size
|
||||||
if isinstance(image_size, tuple):
|
|
||||||
return max(image_size)
|
|
||||||
return image_size
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def image_channels(self):
|
def image_channels(self):
|
||||||
@@ -1073,7 +1070,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|||||||
|
|
||||||
null_text_embeds = self.null_text_embeds.to(text_embed.dtype)
|
null_text_embeds = self.null_text_embeds.to(text_embed.dtype)
|
||||||
|
|
||||||
text_embed = torch.where(
|
text_embeds = torch.where(
|
||||||
text_keep_mask,
|
text_keep_mask,
|
||||||
text_embed,
|
text_embed,
|
||||||
null_text_embeds
|
null_text_embeds
|
||||||
@@ -1262,7 +1259,7 @@ class DiffusionPrior(nn.Module):
|
|||||||
def p_sample_loop_ddim(self, shape, text_cond, *, timesteps, eta = 1., cond_scale = 1.):
|
def p_sample_loop_ddim(self, shape, text_cond, *, timesteps, eta = 1., cond_scale = 1.):
|
||||||
batch, device, alphas, total_timesteps = shape[0], self.device, self.noise_scheduler.alphas_cumprod_prev, self.noise_scheduler.num_timesteps
|
batch, device, alphas, total_timesteps = shape[0], self.device, self.noise_scheduler.alphas_cumprod_prev, self.noise_scheduler.num_timesteps
|
||||||
|
|
||||||
times = torch.linspace(-1., total_timesteps, steps = timesteps + 1)[:-1]
|
times = torch.linspace(0., total_timesteps, steps = timesteps + 2)[:-1]
|
||||||
|
|
||||||
times = list(reversed(times.int().tolist()))
|
times = list(reversed(times.int().tolist()))
|
||||||
time_pairs = list(zip(times[:-1], times[1:]))
|
time_pairs = list(zip(times[:-1], times[1:]))
|
||||||
@@ -1284,14 +1281,12 @@ class DiffusionPrior(nn.Module):
|
|||||||
|
|
||||||
pred = self.net.forward_with_cond_scale(image_embed, time_cond, self_cond = self_cond, cond_scale = cond_scale, **text_cond)
|
pred = self.net.forward_with_cond_scale(image_embed, time_cond, self_cond = self_cond, cond_scale = cond_scale, **text_cond)
|
||||||
|
|
||||||
# derive x0
|
|
||||||
|
|
||||||
if self.predict_x_start:
|
if self.predict_x_start:
|
||||||
x_start = pred
|
x_start = pred
|
||||||
|
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = pred)
|
||||||
else:
|
else:
|
||||||
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred_noise)
|
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred)
|
||||||
|
pred_noise = pred
|
||||||
# clip x0 before maybe predicting noise
|
|
||||||
|
|
||||||
if not self.predict_x_start:
|
if not self.predict_x_start:
|
||||||
x_start.clamp_(-1., 1.)
|
x_start.clamp_(-1., 1.)
|
||||||
@@ -1299,17 +1294,6 @@ class DiffusionPrior(nn.Module):
|
|||||||
if self.predict_x_start and self.sampling_clamp_l2norm:
|
if self.predict_x_start and self.sampling_clamp_l2norm:
|
||||||
x_start = self.l2norm_clamp_embed(x_start)
|
x_start = self.l2norm_clamp_embed(x_start)
|
||||||
|
|
||||||
# predict noise
|
|
||||||
|
|
||||||
if self.predict_x_start:
|
|
||||||
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = x_start)
|
|
||||||
else:
|
|
||||||
pred_noise = pred
|
|
||||||
|
|
||||||
if time_next < 0:
|
|
||||||
image_embed = x_start
|
|
||||||
continue
|
|
||||||
|
|
||||||
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
||||||
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
||||||
noise = torch.randn_like(image_embed) if time_next > 0 else 0.
|
noise = torch.randn_like(image_embed) if time_next > 0 else 0.
|
||||||
@@ -2909,25 +2893,16 @@ class Decoder(nn.Module):
|
|||||||
|
|
||||||
pred, _ = self.parse_unet_output(learned_variance, unet_output)
|
pred, _ = self.parse_unet_output(learned_variance, unet_output)
|
||||||
|
|
||||||
# predict x0
|
|
||||||
|
|
||||||
if predict_x_start:
|
if predict_x_start:
|
||||||
x_start = pred
|
x_start = pred
|
||||||
|
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = pred)
|
||||||
else:
|
else:
|
||||||
x_start = noise_scheduler.predict_start_from_noise(img, t = time_cond, noise = pred)
|
x_start = noise_scheduler.predict_start_from_noise(img, t = time_cond, noise = pred)
|
||||||
|
pred_noise = pred
|
||||||
# maybe clip x0
|
|
||||||
|
|
||||||
if clip_denoised:
|
if clip_denoised:
|
||||||
x_start = self.dynamic_threshold(x_start)
|
x_start = self.dynamic_threshold(x_start)
|
||||||
|
|
||||||
# predict noise
|
|
||||||
|
|
||||||
if predict_x_start:
|
|
||||||
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = pred)
|
|
||||||
else:
|
|
||||||
pred_noise = pred
|
|
||||||
|
|
||||||
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
||||||
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
||||||
noise = torch.randn_like(img) if not is_last_timestep else 0.
|
noise = torch.randn_like(img) if not is_last_timestep else 0.
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = '1.10.6'
|
__version__ = '1.10.2'
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ def generate_samples(trainer, example_data, clip=None, start_unet=1, end_unet=No
|
|||||||
if text_embeddings[0] is None:
|
if text_embeddings[0] is None:
|
||||||
# Generate text embeddings from text
|
# Generate text embeddings from text
|
||||||
assert clip is not None, "clip is None, but text_embeddings is None"
|
assert clip is not None, "clip is None, but text_embeddings is None"
|
||||||
tokenized_texts = tokenize(txts, truncate=True).to(device=device)
|
tokenized_texts = tokenize(txts, truncate=True)
|
||||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||||
sample_params["text_encodings"] = text_encodings
|
sample_params["text_encodings"] = text_encodings
|
||||||
else:
|
else:
|
||||||
@@ -229,8 +229,8 @@ def evaluate_trainer(trainer, dataloader, device, start_unet, end_unet, clip=Non
|
|||||||
metrics["KID_std"] = kid_std.item()
|
metrics["KID_std"] = kid_std.item()
|
||||||
if exists(LPIPS):
|
if exists(LPIPS):
|
||||||
# Convert from [0, 1] to [-1, 1]
|
# Convert from [0, 1] to [-1, 1]
|
||||||
renorm_real_images = real_images.mul(2).sub(1).clamp(-1,1)
|
renorm_real_images = real_images.mul(2).sub(1)
|
||||||
renorm_generated_images = generated_images.mul(2).sub(1).clamp(-1,1)
|
renorm_generated_images = generated_images.mul(2).sub(1)
|
||||||
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
|
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
|
||||||
lpips.to(device=device)
|
lpips.to(device=device)
|
||||||
lpips.update(renorm_real_images, renorm_generated_images)
|
lpips.update(renorm_real_images, renorm_generated_images)
|
||||||
@@ -480,7 +480,7 @@ def train(
|
|||||||
else:
|
else:
|
||||||
# Then we need to pass the text instead
|
# Then we need to pass the text instead
|
||||||
assert clip is not None
|
assert clip is not None
|
||||||
tokenized_texts = tokenize(txt, truncate=True).to(device=inference_device)
|
tokenized_texts = tokenize(txt, truncate=True)
|
||||||
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
|
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
|
||||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||||
forward_params['text_encodings'] = text_encodings
|
forward_params['text_encodings'] = text_encodings
|
||||||
|
|||||||
Reference in New Issue
Block a user