|
|
|
|
@@ -278,6 +278,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|
|
|
|
import clip
|
|
|
|
|
openai_clip, preprocess = clip.load(name)
|
|
|
|
|
super().__init__(openai_clip)
|
|
|
|
|
self.eos_id = 49407 # for handling 0 being also '!'
|
|
|
|
|
|
|
|
|
|
text_attention_final = self.find_layer('ln_final')
|
|
|
|
|
self.handle = text_attention_final.register_forward_hook(self._hook)
|
|
|
|
|
@@ -316,7 +317,10 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def embed_text(self, text):
|
|
|
|
|
text = text[..., :self.max_text_len]
|
|
|
|
|
text_mask = text != 0
|
|
|
|
|
|
|
|
|
|
is_eos_id = (text == self.eos_id)
|
|
|
|
|
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
|
|
|
|
|
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
|
|
|
|
|
assert not self.cleared
|
|
|
|
|
|
|
|
|
|
text_embed = self.clip.encode_text(text)
|
|
|
|
|
@@ -900,7 +904,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|
|
|
|
null_text_embeds = self.null_text_embed.to(text_encodings.dtype)
|
|
|
|
|
|
|
|
|
|
text_encodings = torch.where(
|
|
|
|
|
rearrange(mask, 'b n -> b n 1'),
|
|
|
|
|
rearrange(mask, 'b n -> b n 1').clone(),
|
|
|
|
|
text_encodings,
|
|
|
|
|
null_text_embeds
|
|
|
|
|
)
|
|
|
|
|
@@ -1251,6 +1255,14 @@ class DiffusionPrior(nn.Module):
|
|
|
|
|
|
|
|
|
|
# decoder
|
|
|
|
|
|
|
|
|
|
def NearestUpsample(dim, dim_out = None):
|
|
|
|
|
dim_out = default(dim_out, dim)
|
|
|
|
|
|
|
|
|
|
return nn.Sequential(
|
|
|
|
|
nn.Upsample(scale_factor = 2, mode = 'nearest'),
|
|
|
|
|
nn.Conv2d(dim, dim_out, 3, padding = 1)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
class PixelShuffleUpsample(nn.Module):
|
|
|
|
|
"""
|
|
|
|
|
code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts
|
|
|
|
|
@@ -1657,7 +1669,7 @@ class Unet(nn.Module):
|
|
|
|
|
|
|
|
|
|
# upsample klass
|
|
|
|
|
|
|
|
|
|
upsample_klass = ConvTransposeUpsample if not pixel_shuffle_upsample else PixelShuffleUpsample
|
|
|
|
|
upsample_klass = NearestUpsample if not pixel_shuffle_upsample else PixelShuffleUpsample
|
|
|
|
|
|
|
|
|
|
# give memory efficient unet an initial resnet block
|
|
|
|
|
|
|
|
|
|
@@ -1946,6 +1958,7 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
self,
|
|
|
|
|
downsample_first = True,
|
|
|
|
|
downsample_mode_nearest = False,
|
|
|
|
|
blur_prob = 0.5,
|
|
|
|
|
blur_sigma = 0.6,
|
|
|
|
|
blur_kernel_size = 3,
|
|
|
|
|
input_image_range = None
|
|
|
|
|
@@ -1956,6 +1969,7 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
|
|
|
|
|
self.input_image_range = input_image_range
|
|
|
|
|
|
|
|
|
|
self.blur_prob = blur_prob
|
|
|
|
|
self.blur_sigma = blur_sigma
|
|
|
|
|
self.blur_kernel_size = blur_kernel_size
|
|
|
|
|
|
|
|
|
|
@@ -1968,20 +1982,27 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
blur_sigma = None,
|
|
|
|
|
blur_kernel_size = None
|
|
|
|
|
):
|
|
|
|
|
if self.training and self.downsample_first and exists(downsample_image_size):
|
|
|
|
|
if self.downsample_first and exists(downsample_image_size):
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, downsample_image_size, clamp_range = self.input_image_range, nearest = self.downsample_mode_nearest)
|
|
|
|
|
|
|
|
|
|
if self.training:
|
|
|
|
|
# blur is only applied 50% of the time
|
|
|
|
|
# section 3.1 in https://arxiv.org/abs/2106.15282
|
|
|
|
|
|
|
|
|
|
if random.random() < self.blur_prob:
|
|
|
|
|
|
|
|
|
|
# when training, blur the low resolution conditional image
|
|
|
|
|
|
|
|
|
|
blur_sigma = default(blur_sigma, self.blur_sigma)
|
|
|
|
|
blur_kernel_size = default(blur_kernel_size, self.blur_kernel_size)
|
|
|
|
|
|
|
|
|
|
# allow for drawing a random sigma between lo and hi float values
|
|
|
|
|
|
|
|
|
|
if isinstance(blur_sigma, tuple):
|
|
|
|
|
blur_sigma = tuple(map(float, blur_sigma))
|
|
|
|
|
blur_sigma = random.uniform(*blur_sigma)
|
|
|
|
|
|
|
|
|
|
# allow for drawing a random kernel size between lo and hi int values
|
|
|
|
|
|
|
|
|
|
if isinstance(blur_kernel_size, tuple):
|
|
|
|
|
blur_kernel_size = tuple(map(int, blur_kernel_size))
|
|
|
|
|
kernel_size_lo, kernel_size_hi = blur_kernel_size
|
|
|
|
|
@@ -1990,7 +2011,6 @@ class LowresConditioner(nn.Module):
|
|
|
|
|
cond_fmap = gaussian_blur2d(cond_fmap, cast_tuple(blur_kernel_size, 2), cast_tuple(blur_sigma, 2))
|
|
|
|
|
|
|
|
|
|
cond_fmap = resize_image_to(cond_fmap, target_image_size, clamp_range = self.input_image_range)
|
|
|
|
|
|
|
|
|
|
return cond_fmap
|
|
|
|
|
|
|
|
|
|
class Decoder(nn.Module):
|
|
|
|
|
@@ -2014,6 +2034,7 @@ class Decoder(nn.Module):
|
|
|
|
|
random_crop_sizes = None, # whether to random crop the image at that stage in the cascade (super resoluting convolutions at the end may be able to generalize on smaller crops)
|
|
|
|
|
lowres_downsample_first = True, # cascading ddpm - resizes to lower resolution, then to next conditional resolution + blur
|
|
|
|
|
lowres_downsample_mode_nearest = False, # cascading ddpm - whether to use nearest mode downsampling for lower resolution
|
|
|
|
|
blur_prob = 0.5, # cascading ddpm - when training, the gaussian blur is only applied 50% of the time
|
|
|
|
|
blur_sigma = 0.6, # cascading ddpm - blur sigma
|
|
|
|
|
blur_kernel_size = 3, # cascading ddpm - blur kernel size
|
|
|
|
|
clip_denoised = True,
|
|
|
|
|
@@ -2162,9 +2183,12 @@ class Decoder(nn.Module):
|
|
|
|
|
lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))
|
|
|
|
|
assert lowres_conditions == (False, *((True,) * (len(self.unets) - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'
|
|
|
|
|
|
|
|
|
|
self.lowres_downsample_mode_nearest = lowres_downsample_mode_nearest
|
|
|
|
|
|
|
|
|
|
self.to_lowres_cond = LowresConditioner(
|
|
|
|
|
downsample_first = lowres_downsample_first,
|
|
|
|
|
downsample_mode_nearest = lowres_downsample_mode_nearest,
|
|
|
|
|
blur_prob = blur_prob,
|
|
|
|
|
blur_sigma = blur_sigma,
|
|
|
|
|
blur_kernel_size = blur_kernel_size,
|
|
|
|
|
input_image_range = self.input_image_range
|
|
|
|
|
@@ -2328,6 +2352,9 @@ class Decoder(nn.Module):
|
|
|
|
|
|
|
|
|
|
img = torch.randn(shape, device = device)
|
|
|
|
|
|
|
|
|
|
if not is_latent_diffusion:
|
|
|
|
|
lowres_cond_img = maybe(self.normalize_img)(lowres_cond_img)
|
|
|
|
|
|
|
|
|
|
for time, time_next in tqdm(time_pairs, desc = 'sampling loop time step'):
|
|
|
|
|
alpha = alphas[time]
|
|
|
|
|
alpha_next = alphas[time_next]
|
|
|
|
|
@@ -2480,7 +2507,7 @@ class Decoder(nn.Module):
|
|
|
|
|
shape = (batch_size, channel, image_size, image_size)
|
|
|
|
|
|
|
|
|
|
if unet.lowres_cond:
|
|
|
|
|
lowres_cond_img = self.to_lowres_cond(img, target_image_size = image_size)
|
|
|
|
|
lowres_cond_img = resize_image_to(img, target_image_size = image_size, clamp_range = self.input_image_range, nearest = self.lowres_downsample_mode_nearest)
|
|
|
|
|
|
|
|
|
|
is_latent_diffusion = isinstance(vae, VQGanVAE)
|
|
|
|
|
image_size = vae.get_encoded_fmap_size(image_size)
|
|
|
|
|
|