mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 08:14:21 +01:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
924455d97d | ||
|
|
6021945fc8 | ||
|
|
6f76652d11 | ||
|
|
3dda2570ed |
@@ -508,7 +508,7 @@ To use a pretrained OpenAI CLIP, simply import `OpenAIClipAdapter` and pass it i
|
|||||||
import torch
|
import torch
|
||||||
from dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder, OpenAIClipAdapter
|
from dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder, OpenAIClipAdapter
|
||||||
|
|
||||||
# openai pretrained clip - defaults to ViT/B-32
|
# openai pretrained clip - defaults to ViT-B/32
|
||||||
|
|
||||||
clip = OpenAIClipAdapter()
|
clip = OpenAIClipAdapter()
|
||||||
|
|
||||||
|
|||||||
@@ -831,7 +831,7 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
image_channels = 3,
|
image_channels = 3,
|
||||||
timesteps = 1000,
|
timesteps = 1000,
|
||||||
cond_drop_prob = 0.,
|
cond_drop_prob = 0.,
|
||||||
loss_type = "l1",
|
loss_type = "l2",
|
||||||
predict_x_start = True,
|
predict_x_start = True,
|
||||||
beta_schedule = "cosine",
|
beta_schedule = "cosine",
|
||||||
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
condition_on_text_encodings = True, # the paper suggests this is needed, but you can turn it off for your CLIP preprocessed text embed -> image embed training
|
||||||
@@ -1492,11 +1492,12 @@ class Unet(nn.Module):
|
|||||||
|
|
||||||
if self.cond_on_image_embeds:
|
if self.cond_on_image_embeds:
|
||||||
image_tokens = self.image_to_cond(image_embed)
|
image_tokens = self.image_to_cond(image_embed)
|
||||||
|
null_image_embed = self.null_image_embed.to(image_tokens.dtype) # for some reason pytorch AMP not working
|
||||||
|
|
||||||
image_tokens = torch.where(
|
image_tokens = torch.where(
|
||||||
image_keep_mask,
|
image_keep_mask,
|
||||||
image_tokens,
|
image_tokens,
|
||||||
self.null_image_embed
|
null_image_embed
|
||||||
)
|
)
|
||||||
|
|
||||||
# take care of text encodings (optional)
|
# take care of text encodings (optional)
|
||||||
@@ -1520,10 +1521,12 @@ class Unet(nn.Module):
|
|||||||
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
text_mask = rearrange(text_mask, 'b n -> b n 1')
|
||||||
text_keep_mask = text_mask & text_keep_mask
|
text_keep_mask = text_mask & text_keep_mask
|
||||||
|
|
||||||
|
null_text_embed = self.null_text_embed.to(text_tokens.dtype) # for some reason pytorch AMP not working
|
||||||
|
|
||||||
text_tokens = torch.where(
|
text_tokens = torch.where(
|
||||||
text_keep_mask,
|
text_keep_mask,
|
||||||
text_tokens,
|
text_tokens,
|
||||||
self.null_text_embed
|
null_text_embed
|
||||||
)
|
)
|
||||||
|
|
||||||
# main conditioning tokens (c)
|
# main conditioning tokens (c)
|
||||||
@@ -1611,7 +1614,7 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
timesteps = 1000,
|
timesteps = 1000,
|
||||||
image_cond_drop_prob = 0.1,
|
image_cond_drop_prob = 0.1,
|
||||||
text_cond_drop_prob = 0.5,
|
text_cond_drop_prob = 0.5,
|
||||||
loss_type = 'l1',
|
loss_type = 'l2',
|
||||||
beta_schedule = 'cosine',
|
beta_schedule = 'cosine',
|
||||||
predict_x_start = False,
|
predict_x_start = False,
|
||||||
predict_x_start_for_latent_diffusion = False,
|
predict_x_start_for_latent_diffusion = False,
|
||||||
|
|||||||
@@ -105,6 +105,10 @@ class EMA(nn.Module):
|
|||||||
self.register_buffer('initted', torch.Tensor([False]))
|
self.register_buffer('initted', torch.Tensor([False]))
|
||||||
self.register_buffer('step', torch.tensor([0.]))
|
self.register_buffer('step', torch.tensor([0.]))
|
||||||
|
|
||||||
|
def restore_ema_model_device(self):
|
||||||
|
device = self.initted.device
|
||||||
|
self.ema_model.to(device)
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
self.step += 1
|
self.step += 1
|
||||||
|
|
||||||
@@ -305,6 +309,11 @@ class DecoderTrainer(nn.Module):
|
|||||||
|
|
||||||
if self.use_ema:
|
if self.use_ema:
|
||||||
self.decoder.unets = trainable_unets # restore original training unets
|
self.decoder.unets = trainable_unets # restore original training unets
|
||||||
|
|
||||||
|
# cast the ema_model unets back to original device
|
||||||
|
for ema in self.ema_unets:
|
||||||
|
ema.restore_ema_model_device()
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def forward(
|
def forward(
|
||||||
|
|||||||
Reference in New Issue
Block a user