make sure openai clip adapter outputs l2normed embeddings

This commit is contained in:
Phil Wang
2022-05-06 10:12:03 -07:00
parent 14e63a3f67
commit 28e944f328
2 changed files with 3 additions and 3 deletions

View File

@@ -264,7 +264,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
text_embed = self.clip.encode_text(text)
text_encodings = self.text_encodings
del self.text_encodings
return EmbeddedText(text_embed.float(), text_encodings.float(), text_mask)
return EmbeddedText(l2norm(text_embed.float()), text_encodings.float(), text_mask)
@torch.no_grad()
def embed_image(self, image):
@@ -272,7 +272,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
image = resize_image_to(image, self.image_size)
image = self.clip_normalize(unnormalize_img(image))
image_embed = self.clip.encode_image(image)
return EmbeddedImage(image_embed.float(), None)
return EmbeddedImage(l2norm(image_embed.float()), None)
# classifier free guidance functions

View File

@@ -10,7 +10,7 @@ setup(
'dream = dalle2_pytorch.cli:dream'
],
},
version = '0.1.2',
version = '0.1.4',
license='MIT',
description = 'DALL-E 2',
author = 'Phil Wang',