fix for use with larger openai clip models by extracting dimension of last layernorm in clip

This commit is contained in:
Phil Wang
2022-09-29 09:09:41 -07:00
parent b39653cf96
commit c18c080128
2 changed files with 5 additions and 2 deletions

View File

@@ -314,7 +314,10 @@ class OpenAIClipAdapter(BaseClipAdapter):
self.eos_id = 49407 # for handling 0 being also '!' self.eos_id = 49407 # for handling 0 being also '!'
text_attention_final = self.find_layer('ln_final') text_attention_final = self.find_layer('ln_final')
self.dim_latent_ = text_attention_final.weight.shape[0]
self.handle = text_attention_final.register_forward_hook(self._hook) self.handle = text_attention_final.register_forward_hook(self._hook)
self.clip_normalize = preprocess.transforms[-1] self.clip_normalize = preprocess.transforms[-1]
self.cleared = False self.cleared = False
@@ -333,7 +336,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
@property @property
def dim_latent(self): def dim_latent(self):
return 512 return self.dim_latent_
@property @property
def image_size(self): def image_size(self):

View File

@@ -1 +1 @@
__version__ = '1.10.6' __version__ = '1.10.7'