mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 11:54:22 +01:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
680dfc4d93 | ||
|
|
b6fecae91a | ||
|
|
dab2f74650 | ||
|
|
1e173f4c66 | ||
|
|
410a6144e1 | ||
|
|
c6c3882dc1 | ||
|
|
512b52bd78 | ||
|
|
147c156c8a | ||
|
|
40843bcc21 | ||
|
|
00e07b7d61 | ||
|
|
0069857cf8 | ||
|
|
580274be79 | ||
|
|
848e8a480a | ||
|
|
cc58f75474 | ||
|
|
3b2cf7b0bc | ||
|
|
984d62a373 |
@@ -9,7 +9,7 @@
|
|||||||
"dim_mults": [1, 2, 4, 8],
|
"dim_mults": [1, 2, 4, 8],
|
||||||
"attn_dim_head": 16,
|
"attn_dim_head": 16,
|
||||||
"attn_heads": 4,
|
"attn_heads": 4,
|
||||||
"self_attn": [false, true, true, true]
|
"self_attn": [false, true, true, true]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"clip": {
|
"clip": {
|
||||||
|
|||||||
@@ -12,10 +12,8 @@ from torch.utils.checkpoint import checkpoint
|
|||||||
from torch import nn, einsum
|
from torch import nn, einsum
|
||||||
import torchvision.transforms as T
|
import torchvision.transforms as T
|
||||||
|
|
||||||
from einops import rearrange, repeat, reduce
|
from einops import rearrange, repeat, reduce, pack, unpack
|
||||||
from einops.layers.torch import Rearrange
|
from einops.layers.torch import Rearrange
|
||||||
from einops_exts import rearrange_many, repeat_many, check_shape
|
|
||||||
from einops_exts.torch import EinopsToAndFrom
|
|
||||||
|
|
||||||
from kornia.filters import gaussian_blur2d
|
from kornia.filters import gaussian_blur2d
|
||||||
import kornia.augmentation as K
|
import kornia.augmentation as K
|
||||||
@@ -669,6 +667,23 @@ class NoiseScheduler(nn.Module):
|
|||||||
return loss
|
return loss
|
||||||
return loss * extract(self.p2_loss_weight, times, loss.shape)
|
return loss * extract(self.p2_loss_weight, times, loss.shape)
|
||||||
|
|
||||||
|
# rearrange image to sequence
|
||||||
|
|
||||||
|
class RearrangeToSequence(nn.Module):
|
||||||
|
def __init__(self, fn):
|
||||||
|
super().__init__()
|
||||||
|
self.fn = fn
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = rearrange(x, 'b c ... -> b ... c')
|
||||||
|
x, ps = pack([x], 'b * c')
|
||||||
|
|
||||||
|
x = self.fn(x)
|
||||||
|
|
||||||
|
x, = unpack(x, ps, 'b * c')
|
||||||
|
x = rearrange(x, 'b ... c -> b c ...')
|
||||||
|
return x
|
||||||
|
|
||||||
# diffusion prior
|
# diffusion prior
|
||||||
|
|
||||||
class LayerNorm(nn.Module):
|
class LayerNorm(nn.Module):
|
||||||
@@ -867,7 +882,7 @@ class Attention(nn.Module):
|
|||||||
|
|
||||||
# add null key / value for classifier free guidance in prior net
|
# add null key / value for classifier free guidance in prior net
|
||||||
|
|
||||||
nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)
|
nk, nv = map(lambda t: repeat(t, 'd -> b 1 d', b = b), self.null_kv.unbind(dim = -2))
|
||||||
k = torch.cat((nk, k), dim = -2)
|
k = torch.cat((nk, k), dim = -2)
|
||||||
v = torch.cat((nv, v), dim = -2)
|
v = torch.cat((nv, v), dim = -2)
|
||||||
|
|
||||||
@@ -1124,7 +1139,7 @@ class DiffusionPriorNetwork(nn.Module):
|
|||||||
learned_queries = repeat(self.learned_query, 'd -> b 1 d', b = batch)
|
learned_queries = repeat(self.learned_query, 'd -> b 1 d', b = batch)
|
||||||
|
|
||||||
if self.self_cond:
|
if self.self_cond:
|
||||||
learned_queries = torch.cat((image_embed, self_cond), dim = -2)
|
learned_queries = torch.cat((self_cond, learned_queries), dim = -2)
|
||||||
|
|
||||||
tokens = torch.cat((
|
tokens = torch.cat((
|
||||||
text_encodings,
|
text_encodings,
|
||||||
@@ -1334,10 +1349,7 @@ class DiffusionPrior(nn.Module):
|
|||||||
|
|
||||||
# predict noise
|
# predict noise
|
||||||
|
|
||||||
if self.predict_x_start or self.predict_v:
|
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = x_start)
|
||||||
pred_noise = self.noise_scheduler.predict_noise_from_start(image_embed, t = time_cond, x0 = x_start)
|
|
||||||
else:
|
|
||||||
pred_noise = pred
|
|
||||||
|
|
||||||
if time_next < 0:
|
if time_next < 0:
|
||||||
image_embed = x_start
|
image_embed = x_start
|
||||||
@@ -1632,14 +1644,10 @@ class ResnetBlock(nn.Module):
|
|||||||
self.cross_attn = None
|
self.cross_attn = None
|
||||||
|
|
||||||
if exists(cond_dim):
|
if exists(cond_dim):
|
||||||
self.cross_attn = EinopsToAndFrom(
|
self.cross_attn = CrossAttention(
|
||||||
'b c h w',
|
dim = dim_out,
|
||||||
'b (h w) c',
|
context_dim = cond_dim,
|
||||||
CrossAttention(
|
cosine_sim = cosine_sim_cross_attn
|
||||||
dim = dim_out,
|
|
||||||
context_dim = cond_dim,
|
|
||||||
cosine_sim = cosine_sim_cross_attn
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.block1 = Block(dim, dim_out, groups = groups, weight_standardization = weight_standardization)
|
self.block1 = Block(dim, dim_out, groups = groups, weight_standardization = weight_standardization)
|
||||||
@@ -1658,8 +1666,15 @@ class ResnetBlock(nn.Module):
|
|||||||
|
|
||||||
if exists(self.cross_attn):
|
if exists(self.cross_attn):
|
||||||
assert exists(cond)
|
assert exists(cond)
|
||||||
|
|
||||||
|
h = rearrange(h, 'b c ... -> b ... c')
|
||||||
|
h, ps = pack([h], 'b * c')
|
||||||
|
|
||||||
h = self.cross_attn(h, context = cond) + h
|
h = self.cross_attn(h, context = cond) + h
|
||||||
|
|
||||||
|
h, = unpack(h, ps, 'b * c')
|
||||||
|
h = rearrange(h, 'b ... c -> b c ...')
|
||||||
|
|
||||||
h = self.block2(h)
|
h = self.block2(h)
|
||||||
return h + self.res_conv(x)
|
return h + self.res_conv(x)
|
||||||
|
|
||||||
@@ -1705,11 +1720,11 @@ class CrossAttention(nn.Module):
|
|||||||
|
|
||||||
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
|
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
|
||||||
|
|
||||||
q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)
|
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
|
||||||
|
|
||||||
# add null key / value for classifier free guidance in prior net
|
# add null key / value for classifier free guidance in prior net
|
||||||
|
|
||||||
nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)
|
nk, nv = map(lambda t: repeat(t, 'd -> b h 1 d', h = self.heads, b = b), self.null_kv.unbind(dim = -2))
|
||||||
|
|
||||||
k = torch.cat((nk, k), dim = -2)
|
k = torch.cat((nk, k), dim = -2)
|
||||||
v = torch.cat((nv, v), dim = -2)
|
v = torch.cat((nv, v), dim = -2)
|
||||||
@@ -1762,7 +1777,7 @@ class LinearAttention(nn.Module):
|
|||||||
|
|
||||||
fmap = self.norm(fmap)
|
fmap = self.norm(fmap)
|
||||||
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
|
q, k, v = self.to_qkv(fmap).chunk(3, dim = 1)
|
||||||
q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)
|
q, k, v = map(lambda t: rearrange(t, 'b (h c) x y -> (b h) (x y) c', h = h), (q, k, v))
|
||||||
|
|
||||||
q = q.softmax(dim = -1)
|
q = q.softmax(dim = -1)
|
||||||
k = k.softmax(dim = -2)
|
k = k.softmax(dim = -2)
|
||||||
@@ -1996,7 +2011,7 @@ class Unet(nn.Module):
|
|||||||
|
|
||||||
self_attn = cast_tuple(self_attn, num_stages)
|
self_attn = cast_tuple(self_attn, num_stages)
|
||||||
|
|
||||||
create_self_attn = lambda dim: EinopsToAndFrom('b c h w', 'b (h w) c', Residual(Attention(dim, **attn_kwargs)))
|
create_self_attn = lambda dim: RearrangeToSequence(Residual(Attention(dim, **attn_kwargs)))
|
||||||
|
|
||||||
# resnet block klass
|
# resnet block klass
|
||||||
|
|
||||||
@@ -2496,7 +2511,7 @@ class Decoder(nn.Module):
|
|||||||
dynamic_thres_percentile = 0.95,
|
dynamic_thres_percentile = 0.95,
|
||||||
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
|
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
|
||||||
p2_loss_weight_k = 1,
|
p2_loss_weight_k = 1,
|
||||||
ddim_sampling_eta = 1. # can be set to 0. for deterministic sampling afaict
|
ddim_sampling_eta = 0. # can be set to 0. for deterministic sampling afaict
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@@ -2730,11 +2745,16 @@ class Decoder(nn.Module):
|
|||||||
if exists(unet_number):
|
if exists(unet_number):
|
||||||
unet = self.get_unet(unet_number)
|
unet = self.get_unet(unet_number)
|
||||||
|
|
||||||
|
# devices
|
||||||
|
|
||||||
|
cuda, cpu = torch.device('cuda'), torch.device('cpu')
|
||||||
|
|
||||||
self.cuda()
|
self.cuda()
|
||||||
|
|
||||||
devices = [module_device(unet) for unet in self.unets]
|
devices = [module_device(unet) for unet in self.unets]
|
||||||
self.unets.cpu()
|
|
||||||
unet.cuda()
|
self.unets.to(cpu)
|
||||||
|
unet.to(cuda)
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
@@ -2975,10 +2995,7 @@ class Decoder(nn.Module):
|
|||||||
|
|
||||||
# predict noise
|
# predict noise
|
||||||
|
|
||||||
if predict_x_start or predict_v:
|
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = x_start)
|
||||||
pred_noise = noise_scheduler.predict_noise_from_start(img, t = time_cond, x0 = x_start)
|
|
||||||
else:
|
|
||||||
pred_noise = pred
|
|
||||||
|
|
||||||
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
c1 = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
|
||||||
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
c2 = ((1 - alpha_next) - torch.square(c1)).sqrt()
|
||||||
@@ -3120,7 +3137,8 @@ class Decoder(nn.Module):
|
|||||||
distributed = False,
|
distributed = False,
|
||||||
inpaint_image = None,
|
inpaint_image = None,
|
||||||
inpaint_mask = None,
|
inpaint_mask = None,
|
||||||
inpaint_resample_times = 5
|
inpaint_resample_times = 5,
|
||||||
|
one_unet_in_gpu_at_time = True
|
||||||
):
|
):
|
||||||
assert self.unconditional or exists(image_embed), 'image embed must be present on sampling from decoder unless if trained unconditionally'
|
assert self.unconditional or exists(image_embed), 'image embed must be present on sampling from decoder unless if trained unconditionally'
|
||||||
|
|
||||||
@@ -3143,6 +3161,7 @@ class Decoder(nn.Module):
|
|||||||
assert image.shape[0] == batch_size, 'image must have batch size of {} if starting at unet number > 1'.format(batch_size)
|
assert image.shape[0] == batch_size, 'image must have batch size of {} if starting at unet number > 1'.format(batch_size)
|
||||||
prev_unet_output_size = self.image_sizes[start_at_unet_number - 2]
|
prev_unet_output_size = self.image_sizes[start_at_unet_number - 2]
|
||||||
img = resize_image_to(image, prev_unet_output_size, nearest = True)
|
img = resize_image_to(image, prev_unet_output_size, nearest = True)
|
||||||
|
|
||||||
is_cuda = next(self.parameters()).is_cuda
|
is_cuda = next(self.parameters()).is_cuda
|
||||||
|
|
||||||
num_unets = self.num_unets
|
num_unets = self.num_unets
|
||||||
@@ -3152,7 +3171,7 @@ class Decoder(nn.Module):
|
|||||||
if unet_number < start_at_unet_number:
|
if unet_number < start_at_unet_number:
|
||||||
continue # It's the easiest way to do it
|
continue # It's the easiest way to do it
|
||||||
|
|
||||||
context = self.one_unet_in_gpu(unet = unet) if is_cuda else null_context()
|
context = self.one_unet_in_gpu(unet = unet) if is_cuda and one_unet_in_gpu_at_time else null_context()
|
||||||
|
|
||||||
with context:
|
with context:
|
||||||
# prepare low resolution conditioning for upsamplers
|
# prepare low resolution conditioning for upsamplers
|
||||||
@@ -3229,7 +3248,7 @@ class Decoder(nn.Module):
|
|||||||
learned_variance = self.learned_variance[unet_index]
|
learned_variance = self.learned_variance[unet_index]
|
||||||
b, c, h, w, device, = *image.shape, image.device
|
b, c, h, w, device, = *image.shape, image.device
|
||||||
|
|
||||||
check_shape(image, 'b c h w', c = self.channels)
|
assert image.shape[1] == self.channels
|
||||||
assert h >= target_image_size and w >= target_image_size
|
assert h >= target_image_size and w >= target_image_size
|
||||||
|
|
||||||
times = torch.randint(0, noise_scheduler.num_timesteps, (b,), device = device, dtype = torch.long)
|
times = torch.randint(0, noise_scheduler.num_timesteps, (b,), device = device, dtype = torch.long)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from torchvision import transforms as T
|
from torchvision import transforms as T
|
||||||
from pydantic import BaseModel, validator, root_validator
|
from pydantic import BaseModel, validator, model_validator
|
||||||
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
|
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
|
||||||
|
|
||||||
from x_clip import CLIP as XCLIP
|
from x_clip import CLIP as XCLIP
|
||||||
@@ -38,12 +38,12 @@ class TrainSplitConfig(BaseModel):
|
|||||||
val: float = 0.15
|
val: float = 0.15
|
||||||
test: float = 0.1
|
test: float = 0.1
|
||||||
|
|
||||||
@root_validator
|
@model_validator(mode = 'after')
|
||||||
def validate_all(cls, fields):
|
def validate_all(self, m):
|
||||||
actual_sum = sum([*fields.values()])
|
actual_sum = sum([*dict(self).values()])
|
||||||
if actual_sum != 1.:
|
if actual_sum != 1.:
|
||||||
raise ValueError(f'{fields.keys()} must sum to 1.0. Found: {actual_sum}')
|
raise ValueError(f'{dict(self).keys()} must sum to 1.0. Found: {actual_sum}')
|
||||||
return fields
|
return self
|
||||||
|
|
||||||
class TrackerLogConfig(BaseModel):
|
class TrackerLogConfig(BaseModel):
|
||||||
log_type: str = 'console'
|
log_type: str = 'console'
|
||||||
@@ -59,6 +59,7 @@ class TrackerLogConfig(BaseModel):
|
|||||||
kwargs = self.dict()
|
kwargs = self.dict()
|
||||||
return create_logger(self.log_type, data_path, **kwargs)
|
return create_logger(self.log_type, data_path, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class TrackerLoadConfig(BaseModel):
|
class TrackerLoadConfig(BaseModel):
|
||||||
load_from: Optional[str] = None
|
load_from: Optional[str] = None
|
||||||
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
|
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
|
||||||
@@ -89,7 +90,7 @@ class TrackerConfig(BaseModel):
|
|||||||
data_path: str = '.tracker_data'
|
data_path: str = '.tracker_data'
|
||||||
overwrite_data_path: bool = False
|
overwrite_data_path: bool = False
|
||||||
log: TrackerLogConfig
|
log: TrackerLogConfig
|
||||||
load: Optional[TrackerLoadConfig]
|
load: Optional[TrackerLoadConfig] = None
|
||||||
save: Union[List[TrackerSaveConfig], TrackerSaveConfig]
|
save: Union[List[TrackerSaveConfig], TrackerSaveConfig]
|
||||||
|
|
||||||
def create(self, full_config: BaseModel, extra_config: dict, dummy_mode: bool = False) -> Tracker:
|
def create(self, full_config: BaseModel, extra_config: dict, dummy_mode: bool = False) -> Tracker:
|
||||||
@@ -114,7 +115,7 @@ class TrackerConfig(BaseModel):
|
|||||||
class AdapterConfig(BaseModel):
|
class AdapterConfig(BaseModel):
|
||||||
make: str = "openai"
|
make: str = "openai"
|
||||||
model: str = "ViT-L/14"
|
model: str = "ViT-L/14"
|
||||||
base_model_kwargs: Dict[str, Any] = None
|
base_model_kwargs: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if self.make == "openai":
|
if self.make == "openai":
|
||||||
@@ -133,8 +134,8 @@ class AdapterConfig(BaseModel):
|
|||||||
class DiffusionPriorNetworkConfig(BaseModel):
|
class DiffusionPriorNetworkConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
depth: int
|
depth: int
|
||||||
max_text_len: int = None
|
max_text_len: Optional[int] = None
|
||||||
num_timesteps: int = None
|
num_timesteps: Optional[int] = None
|
||||||
num_time_embeds: int = 1
|
num_time_embeds: int = 1
|
||||||
num_image_embeds: int = 1
|
num_image_embeds: int = 1
|
||||||
num_text_embeds: int = 1
|
num_text_embeds: int = 1
|
||||||
@@ -157,7 +158,7 @@ class DiffusionPriorNetworkConfig(BaseModel):
|
|||||||
return DiffusionPriorNetwork(**kwargs)
|
return DiffusionPriorNetwork(**kwargs)
|
||||||
|
|
||||||
class DiffusionPriorConfig(BaseModel):
|
class DiffusionPriorConfig(BaseModel):
|
||||||
clip: AdapterConfig = None
|
clip: Optional[AdapterConfig] = None
|
||||||
net: DiffusionPriorNetworkConfig
|
net: DiffusionPriorNetworkConfig
|
||||||
image_embed_dim: int
|
image_embed_dim: int
|
||||||
image_size: int
|
image_size: int
|
||||||
@@ -194,7 +195,7 @@ class DiffusionPriorTrainConfig(BaseModel):
|
|||||||
use_ema: bool = True
|
use_ema: bool = True
|
||||||
ema_beta: float = 0.99
|
ema_beta: float = 0.99
|
||||||
amp: bool = False
|
amp: bool = False
|
||||||
warmup_steps: int = None # number of warmup steps
|
warmup_steps: Optional[int] = None # number of warmup steps
|
||||||
save_every_seconds: int = 3600 # how often to save
|
save_every_seconds: int = 3600 # how often to save
|
||||||
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
|
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
|
||||||
best_validation_loss: float = 1e9 # the current best valudation loss observed
|
best_validation_loss: float = 1e9 # the current best valudation loss observed
|
||||||
@@ -227,12 +228,12 @@ class TrainDiffusionPriorConfig(BaseModel):
|
|||||||
class UnetConfig(BaseModel):
|
class UnetConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
dim_mults: ListOrTuple[int]
|
dim_mults: ListOrTuple[int]
|
||||||
image_embed_dim: int = None
|
image_embed_dim: Optional[int] = None
|
||||||
text_embed_dim: int = None
|
text_embed_dim: Optional[int] = None
|
||||||
cond_on_text_encodings: bool = None
|
cond_on_text_encodings: Optional[bool] = None
|
||||||
cond_dim: int = None
|
cond_dim: Optional[int] = None
|
||||||
channels: int = 3
|
channels: int = 3
|
||||||
self_attn: ListOrTuple[int]
|
self_attn: SingularOrIterable[bool] = False
|
||||||
attn_dim_head: int = 32
|
attn_dim_head: int = 32
|
||||||
attn_heads: int = 16
|
attn_heads: int = 16
|
||||||
init_cross_embed: bool = True
|
init_cross_embed: bool = True
|
||||||
@@ -242,14 +243,14 @@ class UnetConfig(BaseModel):
|
|||||||
|
|
||||||
class DecoderConfig(BaseModel):
|
class DecoderConfig(BaseModel):
|
||||||
unets: ListOrTuple[UnetConfig]
|
unets: ListOrTuple[UnetConfig]
|
||||||
image_size: int = None
|
image_size: Optional[int] = None
|
||||||
image_sizes: ListOrTuple[int] = None
|
image_sizes: ListOrTuple[int] = None
|
||||||
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
|
clip: Optional[AdapterConfig] = None # The clip model to use if embeddings are not provided
|
||||||
channels: int = 3
|
channels: int = 3
|
||||||
timesteps: int = 1000
|
timesteps: int = 1000
|
||||||
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
|
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
|
||||||
loss_type: str = 'l2'
|
loss_type: str = 'l2'
|
||||||
beta_schedule: ListOrTuple[str] = None # None means all cosine
|
beta_schedule: Optional[ListOrTuple[str]] = None # None means all cosine
|
||||||
learned_variance: SingularOrIterable[bool] = True
|
learned_variance: SingularOrIterable[bool] = True
|
||||||
image_cond_drop_prob: float = 0.1
|
image_cond_drop_prob: float = 0.1
|
||||||
text_cond_drop_prob: float = 0.5
|
text_cond_drop_prob: float = 0.5
|
||||||
@@ -277,9 +278,9 @@ class DecoderConfig(BaseModel):
|
|||||||
extra = "allow"
|
extra = "allow"
|
||||||
|
|
||||||
class DecoderDataConfig(BaseModel):
|
class DecoderDataConfig(BaseModel):
|
||||||
webdataset_base_url: str # path to a webdataset with jpg images
|
webdataset_base_url: str # path to a webdataset with jpg images
|
||||||
img_embeddings_url: Optional[str] # path to .npy files with embeddings
|
img_embeddings_url: Optional[str] = None # path to .npy files with embeddings
|
||||||
text_embeddings_url: Optional[str] # path to .npy files with embeddings
|
text_embeddings_url: Optional[str] = None # path to .npy files with embeddings
|
||||||
num_workers: int = 4
|
num_workers: int = 4
|
||||||
batch_size: int = 64
|
batch_size: int = 64
|
||||||
start_shard: int = 0
|
start_shard: int = 0
|
||||||
@@ -319,20 +320,20 @@ class DecoderTrainConfig(BaseModel):
|
|||||||
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
|
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
|
||||||
cond_scale: Union[float, List[float]] = 1.0
|
cond_scale: Union[float, List[float]] = 1.0
|
||||||
device: str = 'cuda:0'
|
device: str = 'cuda:0'
|
||||||
epoch_samples: int = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
|
epoch_samples: Optional[int] = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
|
||||||
validation_samples: int = None # Same as above but for validation.
|
validation_samples: Optional[int] = None # Same as above but for validation.
|
||||||
save_immediately: bool = False
|
save_immediately: bool = False
|
||||||
use_ema: bool = True
|
use_ema: bool = True
|
||||||
ema_beta: float = 0.999
|
ema_beta: float = 0.999
|
||||||
amp: bool = False
|
amp: bool = False
|
||||||
unet_training_mask: ListOrTuple[bool] = None # If None, use all unets
|
unet_training_mask: Optional[ListOrTuple[bool]] = None # If None, use all unets
|
||||||
|
|
||||||
class DecoderEvaluateConfig(BaseModel):
|
class DecoderEvaluateConfig(BaseModel):
|
||||||
n_evaluation_samples: int = 1000
|
n_evaluation_samples: int = 1000
|
||||||
FID: Dict[str, Any] = None
|
FID: Optional[Dict[str, Any]] = None
|
||||||
IS: Dict[str, Any] = None
|
IS: Optional[Dict[str, Any]] = None
|
||||||
KID: Dict[str, Any] = None
|
KID: Optional[Dict[str, Any]] = None
|
||||||
LPIPS: Dict[str, Any] = None
|
LPIPS: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
class TrainDecoderConfig(BaseModel):
|
class TrainDecoderConfig(BaseModel):
|
||||||
decoder: DecoderConfig
|
decoder: DecoderConfig
|
||||||
@@ -346,11 +347,14 @@ class TrainDecoderConfig(BaseModel):
|
|||||||
def from_json_path(cls, json_path):
|
def from_json_path(cls, json_path):
|
||||||
with open(json_path) as f:
|
with open(json_path) as f:
|
||||||
config = json.load(f)
|
config = json.load(f)
|
||||||
|
print(config)
|
||||||
return cls(**config)
|
return cls(**config)
|
||||||
|
|
||||||
@root_validator
|
@model_validator(mode = 'after')
|
||||||
def check_has_embeddings(cls, values):
|
def check_has_embeddings(self, m):
|
||||||
# Makes sure that enough information is provided to get the embeddings specified for training
|
# Makes sure that enough information is provided to get the embeddings specified for training
|
||||||
|
values = dict(self)
|
||||||
|
|
||||||
data_config, decoder_config = values.get('data'), values.get('decoder')
|
data_config, decoder_config = values.get('data'), values.get('decoder')
|
||||||
|
|
||||||
if not exists(data_config) or not exists(decoder_config):
|
if not exists(data_config) or not exists(decoder_config):
|
||||||
@@ -375,4 +379,4 @@ class TrainDecoderConfig(BaseModel):
|
|||||||
if text_emb_url:
|
if text_emb_url:
|
||||||
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
|
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
|
||||||
|
|
||||||
return values
|
return m
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = '1.11.4'
|
__version__ = '1.15.6'
|
||||||
|
|||||||
@@ -11,8 +11,7 @@ import torch.nn.functional as F
|
|||||||
from torch.autograd import grad as torch_grad
|
from torch.autograd import grad as torch_grad
|
||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
from einops import rearrange, reduce, repeat
|
from einops import rearrange, reduce, repeat, pack, unpack
|
||||||
from einops_exts import rearrange_many
|
|
||||||
from einops.layers.torch import Rearrange
|
from einops.layers.torch import Rearrange
|
||||||
|
|
||||||
# constants
|
# constants
|
||||||
@@ -408,7 +407,7 @@ class Attention(nn.Module):
|
|||||||
x = self.norm(x)
|
x = self.norm(x)
|
||||||
|
|
||||||
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
|
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
|
||||||
q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)
|
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
|
||||||
|
|
||||||
q = q * self.scale
|
q = q * self.scale
|
||||||
sim = einsum('b h i d, b h j d -> b h i j', q, k)
|
sim = einsum('b h i d, b h j d -> b h i j', q, k)
|
||||||
|
|||||||
7
setup.py
7
setup.py
@@ -27,17 +27,16 @@ setup(
|
|||||||
'accelerate',
|
'accelerate',
|
||||||
'click',
|
'click',
|
||||||
'open-clip-torch>=2.0.0,<3.0.0',
|
'open-clip-torch>=2.0.0,<3.0.0',
|
||||||
'clip-anytorch>=2.4.0',
|
'clip-anytorch>=2.5.2',
|
||||||
'coca-pytorch>=0.0.5',
|
'coca-pytorch>=0.0.5',
|
||||||
'ema-pytorch>=0.0.7',
|
'ema-pytorch>=0.0.7',
|
||||||
'einops>=0.4',
|
'einops>=0.7.0',
|
||||||
'einops-exts>=0.0.3',
|
|
||||||
'embedding-reader',
|
'embedding-reader',
|
||||||
'kornia>=0.5.4',
|
'kornia>=0.5.4',
|
||||||
'numpy',
|
'numpy',
|
||||||
'packaging',
|
'packaging',
|
||||||
'pillow',
|
'pillow',
|
||||||
'pydantic',
|
'pydantic>=2',
|
||||||
'pytorch-warmup',
|
'pytorch-warmup',
|
||||||
'resize-right>=0.0.2',
|
'resize-right>=0.0.2',
|
||||||
'rotary-embedding-torch',
|
'rotary-embedding-torch',
|
||||||
|
|||||||
@@ -511,7 +511,7 @@ def train(
|
|||||||
if next_task == 'eval':
|
if next_task == 'eval':
|
||||||
if exists(evaluate_config):
|
if exists(evaluate_config):
|
||||||
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
|
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
|
||||||
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.dict(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
|
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.model_dump(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
|
||||||
if is_master:
|
if is_master:
|
||||||
tracker.log(evaluation, step=step())
|
tracker.log(evaluation, step=step())
|
||||||
next_task = 'sample'
|
next_task = 'sample'
|
||||||
@@ -548,7 +548,7 @@ def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_
|
|||||||
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
|
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
|
||||||
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
|
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
|
||||||
tracker.save_config(config_path, config_name='decoder_config.json')
|
tracker.save_config(config_path, config_name='decoder_config.json')
|
||||||
tracker.add_save_metadata(state_dict_key='config', metadata=config.dict())
|
tracker.add_save_metadata(state_dict_key='config', metadata=config.model_dump())
|
||||||
return tracker
|
return tracker
|
||||||
|
|
||||||
def initialize_training(config: TrainDecoderConfig, config_path):
|
def initialize_training(config: TrainDecoderConfig, config_path):
|
||||||
@@ -577,6 +577,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
shards_per_process = len(all_shards) // world_size
|
shards_per_process = len(all_shards) // world_size
|
||||||
assert shards_per_process > 0, "Not enough shards to split evenly"
|
assert shards_per_process > 0, "Not enough shards to split evenly"
|
||||||
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
|
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
|
||||||
|
|
||||||
dataloaders = create_dataloaders (
|
dataloaders = create_dataloaders (
|
||||||
available_shards=my_shards,
|
available_shards=my_shards,
|
||||||
img_preproc = config.data.img_preproc,
|
img_preproc = config.data.img_preproc,
|
||||||
@@ -584,7 +585,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
val_prop = config.data.splits.val,
|
val_prop = config.data.splits.val,
|
||||||
test_prop = config.data.splits.test,
|
test_prop = config.data.splits.test,
|
||||||
n_sample_images=config.train.n_sample_images,
|
n_sample_images=config.train.n_sample_images,
|
||||||
**config.data.dict(),
|
**config.data.model_dump(),
|
||||||
rank = rank,
|
rank = rank,
|
||||||
seed = config.seed,
|
seed = config.seed,
|
||||||
)
|
)
|
||||||
@@ -635,7 +636,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
inference_device=accelerator.device,
|
inference_device=accelerator.device,
|
||||||
evaluate_config=config.evaluate,
|
evaluate_config=config.evaluate,
|
||||||
condition_on_text_encodings=conditioning_on_text,
|
condition_on_text_encodings=conditioning_on_text,
|
||||||
**config.train.dict(),
|
**config.train.model_dump(),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a simple click command line interface to load the config and start the training
|
# Create a simple click command line interface to load the config and start the training
|
||||||
|
|||||||
Reference in New Issue
Block a user