|
|
|
|
@@ -782,17 +782,13 @@ class CausalTransformer(nn.Module):
|
|
|
|
|
self.norm = LayerNorm(dim) if norm_out else nn.Identity() # unclear in paper whether they projected after the classic layer norm for the final denoised image embedding, or just had the transformer output it directly: plan on offering both options
|
|
|
|
|
self.project_out = nn.Linear(dim, dim, bias = False) if final_proj else nn.Identity()
|
|
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
|
self,
|
|
|
|
|
x,
|
|
|
|
|
mask = None # we will need a mask here, due to variable length of the text encodings - also offer dalle1 strategy with padding token embeddings
|
|
|
|
|
):
|
|
|
|
|
def forward(self, x):
|
|
|
|
|
n, device = x.shape[1], x.device
|
|
|
|
|
|
|
|
|
|
attn_bias = self.rel_pos_bias(n, n + 1, device = device)
|
|
|
|
|
|
|
|
|
|
for attn, ff in self.layers:
|
|
|
|
|
x = attn(x, mask = mask, attn_bias = attn_bias) + x
|
|
|
|
|
x = attn(x, attn_bias = attn_bias) + x
|
|
|
|
|
x = ff(x) + x
|
|
|
|
|
|
|
|
|
|
out = self.norm(x)
|
|
|
|
|
@@ -806,6 +802,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
num_time_embeds = 1,
|
|
|
|
|
num_image_embeds = 1,
|
|
|
|
|
num_text_embeds = 1,
|
|
|
|
|
max_text_len = 256,
|
|
|
|
|
**kwargs
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
@@ -831,6 +828,11 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
self.learned_query = nn.Parameter(torch.randn(dim))
|
|
|
|
|
self.causal_transformer = CausalTransformer(dim = dim, **kwargs)
|
|
|
|
|
|
|
|
|
|
# dalle1 learned padding strategy
|
|
|
|
|
|
|
|
|
|
self.max_text_len = max_text_len
|
|
|
|
|
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, dim))
|
|
|
|
|
|
|
|
|
|
def forward_with_cond_scale(
|
|
|
|
|
self,
|
|
|
|
|
*args,
|
|
|
|
|
@@ -852,7 +854,6 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
*,
|
|
|
|
|
text_embed,
|
|
|
|
|
text_encodings = None,
|
|
|
|
|
mask = None,
|
|
|
|
|
cond_drop_prob = 0.
|
|
|
|
|
):
|
|
|
|
|
batch, dim, device, dtype = *image_embed.shape, image_embed.device, image_embed.dtype
|
|
|
|
|
@@ -870,9 +871,29 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
|
|
|
|
|
if not exists(text_encodings):
|
|
|
|
|
text_encodings = torch.empty((batch, 0, dim), device = device, dtype = dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mask = torch.any(text_encodings != 0., dim = -1)
|
|
|
|
|
|
|
|
|
|
# replace any padding in the text encodings with learned padding tokens unique across position
|
|
|
|
|
|
|
|
|
|
text_encodings = text_encodings[:, :self.max_text_len]
|
|
|
|
|
mask = mask[:, :self.max_text_len]
|
|
|
|
|
|
|
|
|
|
text_len = text_encodings.shape[-2]
|
|
|
|
|
remainder = self.max_text_len - text_len
|
|
|
|
|
|
|
|
|
|
if remainder > 0:
|
|
|
|
|
text_encodings = F.pad(text_encodings, (0, 0, 0, remainder), value = 0.)
|
|
|
|
|
mask = F.pad(mask, (0, remainder), value = 0.)
|
|
|
|
|
|
|
|
|
|
null_text_embeds = self.null_text_embed.to(text_encodings.dtype)
|
|
|
|
|
|
|
|
|
|
text_encodings = torch.where(
|
|
|
|
|
rearrange(mask, 'b n -> b n 1'),
|
|
|
|
|
text_encodings,
|
|
|
|
|
null_text_embeds
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# classifier free guidance
|
|
|
|
|
|
|
|
|
|
keep_mask = prob_mask_like((batch,), 1 - cond_drop_prob, device = device)
|
|
|
|
|
@@ -905,7 +926,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
|
|
|
|
|
# attend
|
|
|
|
|
|
|
|
|
|
tokens = self.causal_transformer(tokens, mask = mask)
|
|
|
|
|
tokens = self.causal_transformer(tokens)
|
|
|
|
|
|
|
|
|
|
# get learned query, which should predict the image embedding (per DDPM timestep)
|
|
|
|
|
|
|
|
|
|
@@ -1812,6 +1833,7 @@ class Unet(nn.Module):
|
|
|
|
|
text_tokens = None
|
|
|
|
|
|
|
|
|
|
if exists(text_encodings) and self.cond_on_text_encodings:
|
|
|
|
|
assert text_encodings.shape[0] == batch_size, f'the text encodings being passed into the unet does not have the proper batch size - text encoding shape {text_encodings.shape} - required batch size is {batch_size}'
|
|
|
|
|
assert self.text_embed_dim == text_encodings.shape[-1], f'the text encodings you are passing in have a dimension of {text_encodings.shape[-1]}, but the unet was created with text_embed_dim of {self.text_embed_dim}.'
|
|
|
|
|
|
|
|
|
|
text_mask = torch.any(text_encodings != 0., dim = -1)
|
|
|
|
|
|