Add data flexibility to decoder trainer (#165)

* Added the ability to train decoder with text embeddings

* Added the ability to train using on the fly generated embeddings with clip

* Clip now generates embeddings for whatever is not precomputed
This commit is contained in:
Aidan Dempster
2022-06-25 22:05:20 -04:00
committed by GitHub
parent c453f468b1
commit f5760bdb92
4 changed files with 228 additions and 59 deletions

View File

@@ -578,6 +578,18 @@ class DecoderTrainer(nn.Module):
return output
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def embed_text(self, *args, **kwargs):
return self.accelerator.unwrap_model(self.decoder).clip.embed_text(*args, **kwargs)
@torch.no_grad()
@cast_torch_tensor
@prior_sample_in_chunks
def embed_image(self, *args, **kwargs):
return self.accelerator.unwrap_model(self.decoder).clip.embed_image(*args, **kwargs)
@cast_torch_tensor
def forward(
self,