mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2025-12-19 17:54:20 +01:00
normalize conditioning tokens outside of cross attention blocks
This commit is contained in:
@@ -1017,6 +1017,7 @@ Once built, images will be saved to the same directory the command is invoked
|
|||||||
- [ ] offer save / load methods on the trainer classes to automatically take care of state dicts for scalers / optimizers / saving versions and checking for breaking changes
|
- [ ] offer save / load methods on the trainer classes to automatically take care of state dicts for scalers / optimizers / saving versions and checking for breaking changes
|
||||||
- [ ] bring in skip-layer excitatons (from lightweight gan paper) to see if it helps for either decoder of unet or vqgan-vae training
|
- [ ] bring in skip-layer excitatons (from lightweight gan paper) to see if it helps for either decoder of unet or vqgan-vae training
|
||||||
- [ ] decoder needs one day worth of refactor for tech debt
|
- [ ] decoder needs one day worth of refactor for tech debt
|
||||||
|
- [ ] allow for unet to be able to condition non-cross attention style as well
|
||||||
|
|
||||||
## Citations
|
## Citations
|
||||||
|
|
||||||
|
|||||||
@@ -1163,6 +1163,7 @@ class CrossAttention(nn.Module):
|
|||||||
dim_head = 64,
|
dim_head = 64,
|
||||||
heads = 8,
|
heads = 8,
|
||||||
dropout = 0.,
|
dropout = 0.,
|
||||||
|
norm_context = False
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.scale = dim_head ** -0.5
|
self.scale = dim_head ** -0.5
|
||||||
@@ -1172,7 +1173,7 @@ class CrossAttention(nn.Module):
|
|||||||
context_dim = default(context_dim, dim)
|
context_dim = default(context_dim, dim)
|
||||||
|
|
||||||
self.norm = LayerNorm(dim)
|
self.norm = LayerNorm(dim)
|
||||||
self.norm_context = LayerNorm(context_dim)
|
self.norm_context = LayerNorm(context_dim) if norm_context else nn.Identity()
|
||||||
self.dropout = nn.Dropout(dropout)
|
self.dropout = nn.Dropout(dropout)
|
||||||
|
|
||||||
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
|
self.null_kv = nn.Parameter(torch.randn(2, dim_head))
|
||||||
@@ -1378,6 +1379,9 @@ class Unet(nn.Module):
|
|||||||
Rearrange('b (n d) -> b n d', n = num_image_tokens)
|
Rearrange('b (n d) -> b n d', n = num_image_tokens)
|
||||||
) if image_embed_dim != cond_dim else nn.Identity()
|
) if image_embed_dim != cond_dim else nn.Identity()
|
||||||
|
|
||||||
|
self.norm_cond = nn.LayerNorm(cond_dim)
|
||||||
|
self.norm_mid_cond = nn.LayerNorm(cond_dim)
|
||||||
|
|
||||||
# text encoding conditioning (optional)
|
# text encoding conditioning (optional)
|
||||||
|
|
||||||
self.text_to_cond = None
|
self.text_to_cond = None
|
||||||
@@ -1593,6 +1597,11 @@ class Unet(nn.Module):
|
|||||||
|
|
||||||
mid_c = c if not exists(text_tokens) else torch.cat((c, text_tokens), dim = -2)
|
mid_c = c if not exists(text_tokens) else torch.cat((c, text_tokens), dim = -2)
|
||||||
|
|
||||||
|
# normalize conditioning tokens
|
||||||
|
|
||||||
|
c = self.norm_cond(c)
|
||||||
|
mid_c = self.norm_mid_cond(mid_c)
|
||||||
|
|
||||||
# go through the layers of the unet, down and up
|
# go through the layers of the unet, down and up
|
||||||
|
|
||||||
hiddens = []
|
hiddens = []
|
||||||
|
|||||||
Reference in New Issue
Block a user