mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-12 11:34:29 +01:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c18c080128 | ||
|
|
b39653cf96 | ||
|
|
39f8b6cf16 | ||
|
|
d0c11b30b0 | ||
|
|
86e2d5ba84 | ||
|
|
0d82dff9c5 |
@@ -634,10 +634,12 @@ Alternatively, you can also use <a href="https://github.com/mlfoundations/open_c
|
||||
$ pip install open-clip-torch
|
||||
```
|
||||
|
||||
Ex. using the <a href="https://laion.ai/blog/large-openclip/">SOTA Open Clip</a> model trained by <a href="https://github.com/rom1504">Romain</a>
|
||||
|
||||
```python
|
||||
from dalle2_pytorch import OpenClipAdapter
|
||||
|
||||
clip = OpenClipAdapter()
|
||||
clip = OpenClipAdapter('ViT-H/14')
|
||||
```
|
||||
|
||||
Now you'll just have to worry about training the Prior and the Decoder!
|
||||
@@ -1066,7 +1068,7 @@ dataloader = create_image_embedding_dataloader(
|
||||
)
|
||||
for img, emb in dataloader:
|
||||
print(img.shape) # torch.Size([32, 3, 256, 256])
|
||||
print(emb.shape) # torch.Size([32, 512])
|
||||
print(emb["img"].shape) # torch.Size([32, 512])
|
||||
# Train decoder only as shown above
|
||||
|
||||
# Or create a dataset without a loader so you can configure it manually
|
||||
|
||||
@@ -314,7 +314,10 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
||||
self.eos_id = 49407 # for handling 0 being also '!'
|
||||
|
||||
text_attention_final = self.find_layer('ln_final')
|
||||
|
||||
self.dim_latent_ = text_attention_final.weight.shape[0]
|
||||
self.handle = text_attention_final.register_forward_hook(self._hook)
|
||||
|
||||
self.clip_normalize = preprocess.transforms[-1]
|
||||
self.cleared = False
|
||||
|
||||
@@ -333,7 +336,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
||||
|
||||
@property
|
||||
def dim_latent(self):
|
||||
return 512
|
||||
return self.dim_latent_
|
||||
|
||||
@property
|
||||
def image_size(self):
|
||||
@@ -406,7 +409,10 @@ class OpenClipAdapter(BaseClipAdapter):
|
||||
|
||||
@property
|
||||
def image_size(self):
|
||||
return self.clip.visual.image_size
|
||||
image_size = self.clip.visual.image_size
|
||||
if isinstance(image_size, tuple):
|
||||
return max(image_size)
|
||||
return image_size
|
||||
|
||||
@property
|
||||
def image_channels(self):
|
||||
@@ -1281,12 +1287,14 @@ class DiffusionPrior(nn.Module):
|
||||
|
||||
pred = self.net.forward_with_cond_scale(image_embed, time_cond, self_cond = self_cond, cond_scale = cond_scale, **text_cond)
|
||||
|
||||
# derive x0
|
||||
|
||||
if self.predict_x_start:
|
||||
x_start = pred
|
||||
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = pred)
|
||||
else:
|
||||
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred)
|
||||
pred_noise = pred
|
||||
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred_noise)
|
||||
|
||||
# clip x0 before maybe predicting noise
|
||||
|
||||
if not self.predict_x_start:
|
||||
x_start.clamp_(-1., 1.)
|
||||
@@ -1294,6 +1302,13 @@ class DiffusionPrior(nn.Module):
|
||||
if self.predict_x_start and self.sampling_clamp_l2norm:
|
||||
x_start = self.l2norm_clamp_embed(x_start)
|
||||
|
||||
# predict noise
|
||||
|
||||
if self.predict_x_start:
|
||||
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = x_start)
|
||||
else:
|
||||
pred_noise = pred
|
||||
|
||||
if time_next < 0:
|
||||
image_embed = x_start
|
||||
continue
|
||||
@@ -2897,16 +2912,25 @@ class Decoder(nn.Module):
|
||||
|
||||
pred, _ = self.parse_unet_output(learned_variance, unet_output)
|
||||
|
||||
# predict x0
|
||||
|
||||
if predict_x_start:
|
||||
x_start = pred
|
||||
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = pred)
|
||||
else:
|
||||
x_start = noise_scheduler.predict_start_from_noise(img, t = time_cond, noise = pred)
|
||||
pred_noise = pred
|
||||
|
||||
# maybe clip x0
|
||||
|
||||
if clip_denoised:
|
||||
x_start = self.dynamic_threshold(x_start)
|
||||
|
||||
# predict noise
|
||||
|
||||
if predict_x_start:
|
||||
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = pred)
|
||||
else:
|
||||
pred_noise = pred
|
||||
|
||||
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
||||
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
||||
noise = torch.randn_like(img) if not is_last_timestep else 0.
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = '1.10.4'
|
||||
__version__ = '1.10.7'
|
||||
|
||||
@@ -156,7 +156,7 @@ def generate_samples(trainer, example_data, clip=None, start_unet=1, end_unet=No
|
||||
if text_embeddings[0] is None:
|
||||
# Generate text embeddings from text
|
||||
assert clip is not None, "clip is None, but text_embeddings is None"
|
||||
tokenized_texts = tokenize(txts, truncate=True)
|
||||
tokenized_texts = tokenize(txts, truncate=True).to(device=device)
|
||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||
sample_params["text_encodings"] = text_encodings
|
||||
else:
|
||||
@@ -229,8 +229,8 @@ def evaluate_trainer(trainer, dataloader, device, start_unet, end_unet, clip=Non
|
||||
metrics["KID_std"] = kid_std.item()
|
||||
if exists(LPIPS):
|
||||
# Convert from [0, 1] to [-1, 1]
|
||||
renorm_real_images = real_images.mul(2).sub(1)
|
||||
renorm_generated_images = generated_images.mul(2).sub(1)
|
||||
renorm_real_images = real_images.mul(2).sub(1).clamp(-1,1)
|
||||
renorm_generated_images = generated_images.mul(2).sub(1).clamp(-1,1)
|
||||
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
|
||||
lpips.to(device=device)
|
||||
lpips.update(renorm_real_images, renorm_generated_images)
|
||||
@@ -480,7 +480,7 @@ def train(
|
||||
else:
|
||||
# Then we need to pass the text instead
|
||||
assert clip is not None
|
||||
tokenized_texts = tokenize(txt, truncate=True)
|
||||
tokenized_texts = tokenize(txt, truncate=True).to(device=inference_device)
|
||||
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
|
||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||
forward_params['text_encodings'] = text_encodings
|
||||
|
||||
Reference in New Issue
Block a user