mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-23 17:54:20 +01:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c18c080128 | ||
|
|
b39653cf96 | ||
|
|
39f8b6cf16 | ||
|
|
d0c11b30b0 | ||
|
|
86e2d5ba84 |
@@ -634,10 +634,12 @@ Alternatively, you can also use <a href="https://github.com/mlfoundations/open_c
|
|||||||
$ pip install open-clip-torch
|
$ pip install open-clip-torch
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Ex. using the <a href="https://laion.ai/blog/large-openclip/">SOTA Open Clip</a> model trained by <a href="https://github.com/rom1504">Romain</a>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from dalle2_pytorch import OpenClipAdapter
|
from dalle2_pytorch import OpenClipAdapter
|
||||||
|
|
||||||
clip = OpenClipAdapter()
|
clip = OpenClipAdapter('ViT-H/14')
|
||||||
```
|
```
|
||||||
|
|
||||||
Now you'll just have to worry about training the Prior and the Decoder!
|
Now you'll just have to worry about training the Prior and the Decoder!
|
||||||
@@ -1066,7 +1068,7 @@ dataloader = create_image_embedding_dataloader(
|
|||||||
)
|
)
|
||||||
for img, emb in dataloader:
|
for img, emb in dataloader:
|
||||||
print(img.shape) # torch.Size([32, 3, 256, 256])
|
print(img.shape) # torch.Size([32, 3, 256, 256])
|
||||||
print(emb.shape) # torch.Size([32, 512])
|
print(emb["img"].shape) # torch.Size([32, 512])
|
||||||
# Train decoder only as shown above
|
# Train decoder only as shown above
|
||||||
|
|
||||||
# Or create a dataset without a loader so you can configure it manually
|
# Or create a dataset without a loader so you can configure it manually
|
||||||
|
|||||||
@@ -314,7 +314,10 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|||||||
self.eos_id = 49407 # for handling 0 being also '!'
|
self.eos_id = 49407 # for handling 0 being also '!'
|
||||||
|
|
||||||
text_attention_final = self.find_layer('ln_final')
|
text_attention_final = self.find_layer('ln_final')
|
||||||
|
|
||||||
|
self.dim_latent_ = text_attention_final.weight.shape[0]
|
||||||
self.handle = text_attention_final.register_forward_hook(self._hook)
|
self.handle = text_attention_final.register_forward_hook(self._hook)
|
||||||
|
|
||||||
self.clip_normalize = preprocess.transforms[-1]
|
self.clip_normalize = preprocess.transforms[-1]
|
||||||
self.cleared = False
|
self.cleared = False
|
||||||
|
|
||||||
@@ -333,7 +336,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def dim_latent(self):
|
def dim_latent(self):
|
||||||
return 512
|
return self.dim_latent_
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def image_size(self):
|
def image_size(self):
|
||||||
@@ -406,7 +409,10 @@ class OpenClipAdapter(BaseClipAdapter):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def image_size(self):
|
def image_size(self):
|
||||||
return self.clip.visual.image_size
|
image_size = self.clip.visual.image_size
|
||||||
|
if isinstance(image_size, tuple):
|
||||||
|
return max(image_size)
|
||||||
|
return image_size
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def image_channels(self):
|
def image_channels(self):
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = '1.10.5'
|
__version__ = '1.10.7'
|
||||||
|
|||||||
@@ -156,7 +156,7 @@ def generate_samples(trainer, example_data, clip=None, start_unet=1, end_unet=No
|
|||||||
if text_embeddings[0] is None:
|
if text_embeddings[0] is None:
|
||||||
# Generate text embeddings from text
|
# Generate text embeddings from text
|
||||||
assert clip is not None, "clip is None, but text_embeddings is None"
|
assert clip is not None, "clip is None, but text_embeddings is None"
|
||||||
tokenized_texts = tokenize(txts, truncate=True)
|
tokenized_texts = tokenize(txts, truncate=True).to(device=device)
|
||||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||||
sample_params["text_encodings"] = text_encodings
|
sample_params["text_encodings"] = text_encodings
|
||||||
else:
|
else:
|
||||||
@@ -229,8 +229,8 @@ def evaluate_trainer(trainer, dataloader, device, start_unet, end_unet, clip=Non
|
|||||||
metrics["KID_std"] = kid_std.item()
|
metrics["KID_std"] = kid_std.item()
|
||||||
if exists(LPIPS):
|
if exists(LPIPS):
|
||||||
# Convert from [0, 1] to [-1, 1]
|
# Convert from [0, 1] to [-1, 1]
|
||||||
renorm_real_images = real_images.mul(2).sub(1)
|
renorm_real_images = real_images.mul(2).sub(1).clamp(-1,1)
|
||||||
renorm_generated_images = generated_images.mul(2).sub(1)
|
renorm_generated_images = generated_images.mul(2).sub(1).clamp(-1,1)
|
||||||
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
|
lpips = LearnedPerceptualImagePatchSimilarity(**LPIPS, dist_sync_fn=null_sync)
|
||||||
lpips.to(device=device)
|
lpips.to(device=device)
|
||||||
lpips.update(renorm_real_images, renorm_generated_images)
|
lpips.update(renorm_real_images, renorm_generated_images)
|
||||||
@@ -480,7 +480,7 @@ def train(
|
|||||||
else:
|
else:
|
||||||
# Then we need to pass the text instead
|
# Then we need to pass the text instead
|
||||||
assert clip is not None
|
assert clip is not None
|
||||||
tokenized_texts = tokenize(txt, truncate=True)
|
tokenized_texts = tokenize(txt, truncate=True).to(device=inference_device)
|
||||||
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
|
assert tokenized_texts.shape[0] == len(img), f"The number of texts ({tokenized_texts.shape[0]}) should be the same as the number of images ({len(img)})"
|
||||||
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
text_embed, text_encodings = clip.embed_text(tokenized_texts)
|
||||||
forward_params['text_encodings'] = text_encodings
|
forward_params['text_encodings'] = text_encodings
|
||||||
|
|||||||
Reference in New Issue
Block a user