|
|
|
|
@@ -175,12 +175,34 @@ def save_diffusion_model(save_path, model, optimizer, scaler, config, image_embe
|
|
|
|
|
# exponential moving average wrapper
|
|
|
|
|
|
|
|
|
|
class EMA(nn.Module):
|
|
|
|
|
"""
|
|
|
|
|
Implements exponential moving average shadowing for your model.
|
|
|
|
|
|
|
|
|
|
Utilizes an inverse decay schedule to manage longer term training runs.
|
|
|
|
|
By adjusting the power, you can control how fast EMA will ramp up to your specified beta.
|
|
|
|
|
|
|
|
|
|
@crowsonkb's notes on EMA Warmup:
|
|
|
|
|
|
|
|
|
|
If gamma=1 and power=1, implements a simple average. gamma=1, power=2/3 are
|
|
|
|
|
good values for models you plan to train for a million or more steps (reaches decay
|
|
|
|
|
factor 0.999 at 31.6K steps, 0.9999 at 1M steps), gamma=1, power=3/4 for models
|
|
|
|
|
you plan to train for less (reaches decay factor 0.999 at 10K steps, 0.9999 at
|
|
|
|
|
215.4k steps).
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
inv_gamma (float): Inverse multiplicative factor of EMA warmup. Default: 1.
|
|
|
|
|
power (float): Exponential factor of EMA warmup. Default: 1.
|
|
|
|
|
min_value (float): The minimum EMA decay rate. Default: 0.
|
|
|
|
|
"""
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
model,
|
|
|
|
|
beta = 0.99,
|
|
|
|
|
update_after_step = 1000,
|
|
|
|
|
beta = 0.9999,
|
|
|
|
|
update_after_step = 10000,
|
|
|
|
|
update_every = 10,
|
|
|
|
|
inv_gamma = 1.0,
|
|
|
|
|
power = 2/3,
|
|
|
|
|
min_value = 0.0,
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.beta = beta
|
|
|
|
|
@@ -188,7 +210,11 @@ class EMA(nn.Module):
|
|
|
|
|
self.ema_model = copy.deepcopy(model)
|
|
|
|
|
|
|
|
|
|
self.update_every = update_every
|
|
|
|
|
self.update_after_step = update_after_step // update_every # only start EMA after this step number, starting at 0
|
|
|
|
|
self.update_after_step = update_after_step
|
|
|
|
|
|
|
|
|
|
self.inv_gamma = inv_gamma
|
|
|
|
|
self.power = power
|
|
|
|
|
self.min_value = min_value
|
|
|
|
|
|
|
|
|
|
self.register_buffer('initted', torch.Tensor([False]))
|
|
|
|
|
self.register_buffer('step', torch.tensor([0]))
|
|
|
|
|
@@ -198,37 +224,44 @@ class EMA(nn.Module):
|
|
|
|
|
self.ema_model.to(device)
|
|
|
|
|
|
|
|
|
|
def copy_params_from_model_to_ema(self):
|
|
|
|
|
self.ema_model.state_dict(self.online_model.state_dict())
|
|
|
|
|
for ma_param, current_param in zip(list(self.ema_model.parameters()), list(self.online_model.parameters())):
|
|
|
|
|
ma_param.data.copy_(current_param.data)
|
|
|
|
|
|
|
|
|
|
def get_current_decay(self):
|
|
|
|
|
epoch = max(0, self.step.item() - self.update_after_step - 1)
|
|
|
|
|
value = 1 - (1 + epoch / self.inv_gamma) ** - self.power
|
|
|
|
|
return 0. if epoch < 0 else min(self.beta, max(self.min_value, value))
|
|
|
|
|
|
|
|
|
|
def update(self):
|
|
|
|
|
step = self.step.item()
|
|
|
|
|
self.step += 1
|
|
|
|
|
|
|
|
|
|
if (self.step % self.update_every) != 0:
|
|
|
|
|
if (step % self.update_every) != 0:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if self.step <= self.update_after_step:
|
|
|
|
|
if step <= self.update_after_step:
|
|
|
|
|
self.copy_params_from_model_to_ema()
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if not self.initted:
|
|
|
|
|
if not self.initted.item():
|
|
|
|
|
self.copy_params_from_model_to_ema()
|
|
|
|
|
self.initted.data.copy_(torch.Tensor([True]))
|
|
|
|
|
|
|
|
|
|
self.update_moving_average(self.ema_model, self.online_model)
|
|
|
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
|
def update_moving_average(self, ma_model, current_model):
|
|
|
|
|
def calculate_ema(beta, old, new):
|
|
|
|
|
if not exists(old):
|
|
|
|
|
return new
|
|
|
|
|
return old * beta + (1 - beta) * new
|
|
|
|
|
current_decay = self.get_current_decay()
|
|
|
|
|
|
|
|
|
|
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
|
|
|
|
|
old_weight, up_weight = ma_params.data, current_params.data
|
|
|
|
|
ma_params.data = calculate_ema(self.beta, old_weight, up_weight)
|
|
|
|
|
for current_params, ma_params in zip(list(current_model.parameters()), list(ma_model.parameters())):
|
|
|
|
|
difference = ma_params.data - current_params.data
|
|
|
|
|
difference.mul_(1.0 - current_decay)
|
|
|
|
|
ma_params.sub_(difference)
|
|
|
|
|
|
|
|
|
|
for current_buffer, ma_buffer in zip(current_model.buffers(), ma_model.buffers()):
|
|
|
|
|
new_buffer_value = calculate_ema(self.beta, ma_buffer, current_buffer)
|
|
|
|
|
ma_buffer.copy_(new_buffer_value)
|
|
|
|
|
for current_buffer, ma_buffer in zip(list(current_model.buffers()), list(ma_model.buffers())):
|
|
|
|
|
difference = ma_buffer - current_buffer
|
|
|
|
|
difference.mul_(1.0 - current_decay)
|
|
|
|
|
ma_buffer.sub_(difference)
|
|
|
|
|
|
|
|
|
|
def __call__(self, *args, **kwargs):
|
|
|
|
|
return self.ema_model(*args, **kwargs)
|
|
|
|
|
|