Compare commits

..

1 Commits
0.0.7 ... 0.0.4

3 changed files with 30 additions and 40 deletions

View File

@@ -34,7 +34,7 @@ Once built, images will be saved to the same directory the command is invoked
To train DALLE-2 is a 3 step process, with the training of CLIP being the most important
To train CLIP, you can either use <a href="https://github.com/lucidrains/x-clip">x-clip</a> package, or join the LAION discord, where a lot of replication efforts are already underway.
To train CLIP, you can either use `x-clip` package, or join the LAION discord, where a lot of replication efforts are already underway.
This repository will demonstrate integration with `x-clip` for starters
@@ -136,14 +136,12 @@ loss.backward()
# then it will learn to generate images based on the CLIP image embeddings
```
Finally, the main contribution of the paper. The repository offers the diffusion prior network. It takes the CLIP text embeddings and tries to generate the CLIP image embeddings. Again, you will need the trained CLIP from the first step
Finally, the main contribution of the paper. The repository offers the diffusion prior network. It takes the CLIP text embeddings and tries to generate the CLIP image embeddings. Again, you will need the trained CLIP fron the first step
```python
import torch
from dalle2_pytorch import DiffusionPriorNetwork, DiffusionPrior, CLIP
# get trained CLIP from step one
clip = CLIP(
dim_text = 512,
dim_image = 512,
@@ -201,7 +199,7 @@ dalle2 = DALLE2(
decoder = decoder
)
# send the text as a string if you want to use the simple tokenizer from DALLE v1
# send the text as a string if you want to use the simple tokenizer from DALL-E1
# or you can do it as token ids, if you have your own tokenizer
texts = ['glistening morning dew on a flower petal']
@@ -214,7 +212,10 @@ Let's see the whole script below
```python
import torch
from dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder, CLIP
from dalle2_pytorch.dalle2_pytorch import DALLE2
from dalle2_pytorch import DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder, CLIP
import torch
clip = CLIP(
dim_text = 512,
@@ -296,18 +297,13 @@ dalle2 = DALLE2(
decoder = decoder
)
images = dalle2(
['cute puppy chasing after a squirrel'],
cond_scale = 2. # classifier free guidance strength (> 1 would strengthen the condition)
)
images = dalle2(['cute puppy chasing after a squirrel'])
# save your image
```
Everything in this readme should run without error
For the layperson, no worries, training will all be automated into a CLI tool, at least for small scale training.
## Training CLI (wip)
<a href="https://github.com/lucidrains/stylegan2-pytorch">template</a>
@@ -322,7 +318,6 @@ For the layperson, no worries, training will all be automated into a CLI tool, a
- [ ] figure out all the current bag of tricks needed to make DDPMs great (starting with the blur trick mentioned in paper)
- [ ] train on a toy task, offer in colab
- [ ] add attention to unet - apply some personal tricks with efficient attention
- [ ] figure out the big idea behind latent diffusion and what can be ported over
## Citations
@@ -370,5 +365,3 @@ For the layperson, no worries, training will all be automated into a CLI tool, a
primaryClass = {cs.LG}
}
```
*Creating noise from data is easy; creating data from noise is generative modeling.* - Yang Song's <a href="https://arxiv.org/abs/2011.13456">paper</a>

View File

@@ -246,16 +246,15 @@ class DiffusionPriorNetwork(nn.Module):
def forward_with_cond_scale(
self,
x,
*args,
*,
cond_scale = 1.,
**kwargs
):
logits = self.forward(x, *args, **kwargs)
if cond_scale == 1:
return logits
return self.forward(x, **kwargs)
null_logits = self.forward(x, *args, cond_drop_prob = 1., **kwargs)
logits = self.forward(x, **kwargs)
null_logits = self.forward(x, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
@@ -375,13 +374,12 @@ class DiffusionPrior(nn.Module):
image_encoding = self.clip.visual_transformer(image)
image_cls = image_encoding[:, 0]
image_embed = self.clip.to_visual_latent(image_cls)
return l2norm(image_embed)
return image_embed
def get_text_cond(self, text):
text_encodings = self.clip.text_transformer(text)
text_cls, text_encodings = text_encodings[:, 0], text_encodings[:, 1:]
text_embed = self.clip.to_text_latent(text_cls)
text_embed = l2norm(text_embed)
return dict(text_encodings = text_encodings, text_embed = text_embed, mask = text != 0)
def q_mean_variance(self, x_start, t):
@@ -636,16 +634,15 @@ class Unet(nn.Module):
def forward_with_cond_scale(
self,
x,
*args,
*,
cond_scale = 1.,
**kwargs
):
logits = self.forward(x, *args, **kwargs)
if cond_scale == 1:
return logits
return self.forward(x, **kwargs)
null_logits = self.forward(x, *args, cond_drop_prob = 1., **kwargs)
logits = self.forward(x, **kwargs)
null_logits = self.forward(x, cond_drop_prob = 1., **kwargs)
return null_logits + (logits - null_logits) * cond_scale
def forward(
@@ -753,7 +750,7 @@ class Decoder(nn.Module):
image_encoding = self.clip.visual_transformer(image)
image_cls = image_encoding[:, 0]
image_embed = self.clip.to_visual_latent(image_cls)
return l2norm(image_embed)
return image_embed
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
@@ -776,8 +773,8 @@ class Decoder(nn.Module):
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, image_embed, clip_denoised = True, cond_scale = 1.):
x_recon = self.predict_start_from_noise(x, t = t, noise = self.net.forward_with_cond_scale(x, t, image_embed = image_embed, cond_scale = cond_scale))
def p_mean_variance(self, x, t, image_embed, clip_denoised: bool):
x_recon = self.predict_start_from_noise(x, t = t, noise = self.net(x, t, image_embed = image_embed))
if clip_denoised:
x_recon.clamp_(-1., 1.)
@@ -786,31 +783,31 @@ class Decoder(nn.Module):
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, image_embed, cond_scale = 1., clip_denoised = True, repeat_noise = False):
def p_sample(self, x, t, image_embed, clip_denoised = True, repeat_noise = False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x = x, t = t, image_embed = image_embed, cond_scale = cond_scale, clip_denoised = clip_denoised)
model_mean, _, model_log_variance = self.p_mean_variance(x = x, t = t, image_embed = image_embed, clip_denoised = clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, image_embed, cond_scale = 1):
def p_sample_loop(self, shape, image_embed):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device = device, dtype = torch.long), image_embed = image_embed, cond_scale = cond_scale)
img = self.p_sample(img, torch.full((b,), i, device = device, dtype = torch.long), image_embed = image_embed)
return img
@torch.no_grad()
def sample(self, image_embed, cond_scale = 1.):
def sample(self, image_embed):
batch_size = image_embed.shape[0]
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size), image_embed = image_embed, cond_scale = cond_scale)
return self.p_sample_loop((batch_size, channels, image_size, image_size), image_embed = image_embed)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
@@ -871,8 +868,7 @@ class DALLE2(nn.Module):
@torch.no_grad()
def forward(
self,
text,
cond_scale = 1.
text
):
device = next(self.parameters()).device
@@ -880,6 +876,7 @@ class DALLE2(nn.Module):
text = [text] if not isinstance(text, (list, tuple)) else text
text = tokenizer.tokenize(text).to(device)
print(text.shape, type(text))
image_embed = self.prior.sample(text, num_samples_per_batch = self.prior_num_samples)
images = self.decoder.sample(image_embed, cond_scale = cond_scale)
images = self.decoder.sample(image_embed)
return images

View File

@@ -10,7 +10,7 @@ setup(
'dream = dalle2_pytorch.cli:dream'
],
},
version = '0.0.7',
version = '0.0.4',
license='MIT',
description = 'DALL-E 2',
author = 'Phil Wang',