bring in the skip connection scaling factor, used by imagen in their unets, cite original paper using it

This commit is contained in:
Phil Wang
2022-06-26 21:59:55 -07:00
parent b90364695d
commit 6a11b9678b
3 changed files with 19 additions and 2 deletions

View File

@@ -1359,6 +1359,7 @@ class Unet(nn.Module):
cross_embed_downsample = False,
cross_embed_downsample_kernel_sizes = (2, 4),
memory_efficient = False,
scale_skip_connection = False,
**kwargs
):
super().__init__()
@@ -1440,6 +1441,10 @@ class Unet(nn.Module):
self.max_text_len = max_text_len
self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))
# whether to scale skip connection, adopted in Imagen
self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)
# attention related params
attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)
@@ -1687,7 +1692,9 @@ class Unet(nn.Module):
x = self.mid_block2(x, mid_c, t)
for init_block, sparse_attn, resnet_blocks, upsample in self.ups:
x = torch.cat((x, hiddens.pop()), dim = 1)
skip_connect = hiddens.pop() * self.skip_connect_scale
x = torch.cat((x, skip_connect), dim = 1)
x = init_block(x, c, t)
x = sparse_attn(x)