mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-22 14:54:28 +01:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6161b61c55 | ||
|
|
1ed0f9d80b | ||
|
|
f326a95e26 | ||
|
|
d7a0a2ce4b | ||
|
|
f23fab7ef7 | ||
|
|
857b9fbf1e | ||
|
|
8864fd0aa7 | ||
|
|
72bf159331 | ||
|
|
e5e47cfecb | ||
|
|
fa533962bd | ||
|
|
276abf337b |
12
README.md
12
README.md
@@ -12,7 +12,7 @@ This model is SOTA for text-to-image for now.
|
|||||||
|
|
||||||
Please join <a href="https://discord.gg/xBPBXfcFHd"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a> if you are interested in helping out with the replication with the <a href="https://laion.ai/">LAION</a> community | <a href="https://www.youtube.com/watch?v=AIOE1l1W0Tw">Yannic Interview</a>
|
Please join <a href="https://discord.gg/xBPBXfcFHd"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a> if you are interested in helping out with the replication with the <a href="https://laion.ai/">LAION</a> community | <a href="https://www.youtube.com/watch?v=AIOE1l1W0Tw">Yannic Interview</a>
|
||||||
|
|
||||||
There was enough interest for a <a href="https://github.com/lucidrains/dalle2-jax">Jax version</a>. I will also eventually extend this to <a href="https://github.com/lucidrains/dalle2-video">text to video</a>, once the repository is in a good place.
|
As of 5/23/22, it is no longer SOTA. SOTA will be <a href="https://github.com/lucidrains/imagen-pytorch">here</a>. Jax versions as well as text-to-video project will be shifted towards the Imagen architecture, as it is way simpler.
|
||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
@@ -26,7 +26,7 @@ There was enough interest for a <a href="https://github.com/lucidrains/dalle2-ja
|
|||||||
|
|
||||||
## Pre-Trained Models
|
## Pre-Trained Models
|
||||||
- LAION is training prior models. Checkpoints are available on <a href="https://huggingface.co/zenglishuci/conditioned-prior">🤗huggingface</a> and the training statistics are available on <a href="https://wandb.ai/nousr_laion/conditioned-prior/reports/LAION-DALLE2-PyTorch-Prior--VmlldzoyMDI2OTIx">🐝WANDB</a>.
|
- LAION is training prior models. Checkpoints are available on <a href="https://huggingface.co/zenglishuci/conditioned-prior">🤗huggingface</a> and the training statistics are available on <a href="https://wandb.ai/nousr_laion/conditioned-prior/reports/LAION-DALLE2-PyTorch-Prior--VmlldzoyMDI2OTIx">🐝WANDB</a>.
|
||||||
- Decoder 🚧
|
- Decoder - <a href="https://wandb.ai/veldrovive/dalle2_train_decoder/runs/jkrtg0so?workspace=user-veldrovive">In-progress test run</a> 🚧
|
||||||
- DALL-E 2 🚧
|
- DALL-E 2 🚧
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
@@ -1195,4 +1195,12 @@ This library would not have gotten to this working state without the help of
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
```bibtex
|
||||||
|
@misc{Saharia2022,
|
||||||
|
title = {Imagen: unprecedented photorealism × deep level of language understanding},
|
||||||
|
author = {Chitwan Saharia*, William Chan*, Saurabh Saxena†, Lala Li†, Jay Whang†, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S. Sara Mahdavi, Rapha Gontijo Lopes, Tim Salimans, Jonathan Ho†, David Fleet†, Mohammad Norouzi*},
|
||||||
|
year = {2022}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
*Creating noise from data is easy; creating data from noise is generative modeling.* - <a href="https://arxiv.org/abs/2011.13456">Yang Song's paper</a>
|
*Creating noise from data is easy; creating data from noise is generative modeling.* - <a href="https://arxiv.org/abs/2011.13456">Yang Song's paper</a>
|
||||||
|
|||||||
70
configs/train_prior_config.example.json
Normal file
70
configs/train_prior_config.example.json
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
{
|
||||||
|
"prior": {
|
||||||
|
"clip": {
|
||||||
|
"make": "x-clip",
|
||||||
|
"model": "ViT-L/14",
|
||||||
|
"base_model_kwargs": {
|
||||||
|
"dim_text": 768,
|
||||||
|
"dim_image": 768,
|
||||||
|
"dim_latent": 768
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"net": {
|
||||||
|
"dim": 768,
|
||||||
|
"depth": 12,
|
||||||
|
"num_timesteps": 1000,
|
||||||
|
"num_time_embeds": 1,
|
||||||
|
"num_image_embeds": 1,
|
||||||
|
"num_text_embeds": 1,
|
||||||
|
"dim_head": 64,
|
||||||
|
"heads": 12,
|
||||||
|
"ff_mult": 4,
|
||||||
|
"norm_out": true,
|
||||||
|
"attn_dropout": 0.0,
|
||||||
|
"ff_dropout": 0.0,
|
||||||
|
"final_proj": true,
|
||||||
|
"normformer": true,
|
||||||
|
"rotary_emb": true
|
||||||
|
},
|
||||||
|
"image_embed_dim": 768,
|
||||||
|
"image_size": 224,
|
||||||
|
"image_channels": 3,
|
||||||
|
"timesteps": 1000,
|
||||||
|
"cond_drop_prob": 0.1,
|
||||||
|
"loss_type": "l2",
|
||||||
|
"predict_x_start": true,
|
||||||
|
"beta_schedule": "cosine",
|
||||||
|
"condition_on_text_encodings": true
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"image_url": "https://mystic.the-eye.eu/public/AI/cah/laion5b/embeddings/laion2B-en/img_emb/",
|
||||||
|
"text_url": "https://mystic.the-eye.eu/public/AI/cah/laion5b/embeddings/laion2B-en/text_emb/",
|
||||||
|
"meta_url": "https://mystic.the-eye.eu/public/AI/cah/laion5b/embeddings/laion2B-en/laion2B-en-metadata/",
|
||||||
|
"batch_size": 256,
|
||||||
|
"splits": {
|
||||||
|
"train": 0.9,
|
||||||
|
"val": 1e-7,
|
||||||
|
"test": 0.0999999
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"train": {
|
||||||
|
"epochs": 1,
|
||||||
|
"lr": 1.1e-4,
|
||||||
|
"wd": 6.02e-2,
|
||||||
|
"max_grad_norm": 0.5,
|
||||||
|
"use_ema": true,
|
||||||
|
"amp": false,
|
||||||
|
"save_every": 10000
|
||||||
|
},
|
||||||
|
"load": {
|
||||||
|
"source": null,
|
||||||
|
"resume": false
|
||||||
|
},
|
||||||
|
"tracker": {
|
||||||
|
"tracker_type": "wandb",
|
||||||
|
"data_path": "./prior_checkpoints",
|
||||||
|
"wandb_entity": "laion",
|
||||||
|
"wandb_project": "diffusion-prior",
|
||||||
|
"verbose": true
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -890,6 +890,8 @@ class DiffusionPrior(BaseGaussianDiffusion):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if exists(clip):
|
if exists(clip):
|
||||||
|
assert image_channels == clip.image_channels, f'channels of image ({image_channels}) should be equal to the channels that CLIP accepts ({clip.image_channels})'
|
||||||
|
|
||||||
if isinstance(clip, CLIP):
|
if isinstance(clip, CLIP):
|
||||||
clip = XClipAdapter(clip, **clip_adapter_overrides)
|
clip = XClipAdapter(clip, **clip_adapter_overrides)
|
||||||
elif isinstance(clip, CoCa):
|
elif isinstance(clip, CoCa):
|
||||||
@@ -1105,13 +1107,20 @@ class Block(nn.Module):
|
|||||||
groups = 8
|
groups = 8
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.block = nn.Sequential(
|
self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)
|
||||||
nn.Conv2d(dim, dim_out, 3, padding = 1),
|
self.norm = nn.GroupNorm(groups, dim_out)
|
||||||
nn.GroupNorm(groups, dim_out),
|
self.act = nn.SiLU()
|
||||||
nn.SiLU()
|
|
||||||
)
|
def forward(self, x, scale_shift = None):
|
||||||
def forward(self, x):
|
x = self.project(x)
|
||||||
return self.block(x)
|
x = self.norm(x)
|
||||||
|
|
||||||
|
if exists(scale_shift):
|
||||||
|
scale, shift = scale_shift
|
||||||
|
x = x * (scale + 1) + shift
|
||||||
|
|
||||||
|
x = self.act(x)
|
||||||
|
return x
|
||||||
|
|
||||||
class ResnetBlock(nn.Module):
|
class ResnetBlock(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -1130,7 +1139,7 @@ class ResnetBlock(nn.Module):
|
|||||||
if exists(time_cond_dim):
|
if exists(time_cond_dim):
|
||||||
self.time_mlp = nn.Sequential(
|
self.time_mlp = nn.Sequential(
|
||||||
nn.SiLU(),
|
nn.SiLU(),
|
||||||
nn.Linear(time_cond_dim, dim_out)
|
nn.Linear(time_cond_dim, dim_out * 2)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.cross_attn = None
|
self.cross_attn = None
|
||||||
@@ -1150,11 +1159,14 @@ class ResnetBlock(nn.Module):
|
|||||||
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
|
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
|
||||||
|
|
||||||
def forward(self, x, cond = None, time_emb = None):
|
def forward(self, x, cond = None, time_emb = None):
|
||||||
h = self.block1(x)
|
|
||||||
|
|
||||||
|
scale_shift = None
|
||||||
if exists(self.time_mlp) and exists(time_emb):
|
if exists(self.time_mlp) and exists(time_emb):
|
||||||
time_emb = self.time_mlp(time_emb)
|
time_emb = self.time_mlp(time_emb)
|
||||||
h = rearrange(time_emb, 'b c -> b c 1 1') + h
|
time_emb = rearrange(time_emb, 'b c -> b c 1 1')
|
||||||
|
scale_shift = time_emb.chunk(2, dim = 1)
|
||||||
|
|
||||||
|
h = self.block1(x, scale_shift = scale_shift)
|
||||||
|
|
||||||
if exists(self.cross_attn):
|
if exists(self.cross_attn):
|
||||||
assert exists(cond)
|
assert exists(cond)
|
||||||
@@ -1702,6 +1714,8 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
vb_loss_weight = 0.001,
|
vb_loss_weight = 0.001,
|
||||||
unconditional = False,
|
unconditional = False,
|
||||||
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
|
auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader
|
||||||
|
use_dynamic_thres = False, # from the Imagen paper
|
||||||
|
dynamic_thres_percentile = 0.9
|
||||||
):
|
):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
beta_schedule = beta_schedule,
|
beta_schedule = beta_schedule,
|
||||||
@@ -1710,12 +1724,19 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.unconditional = unconditional
|
self.unconditional = unconditional
|
||||||
assert not (condition_on_text_encodings and unconditional), 'unconditional decoder image generation cannot be set to True if conditioning on text is present'
|
|
||||||
|
|
||||||
assert self.unconditional or (exists(clip) or exists(image_size) or exists(image_sizes)), 'either CLIP is supplied, or you must give the image_size and channels (usually 3 for RGB)'
|
# text conditioning
|
||||||
|
|
||||||
|
assert not (condition_on_text_encodings and unconditional), 'unconditional decoder image generation cannot be set to True if conditioning on text is present'
|
||||||
|
self.condition_on_text_encodings = condition_on_text_encodings
|
||||||
|
|
||||||
|
# clip
|
||||||
|
|
||||||
self.clip = None
|
self.clip = None
|
||||||
if exists(clip):
|
if exists(clip):
|
||||||
|
assert not unconditional, 'clip must not be given if doing unconditional image training'
|
||||||
|
assert channels == clip.image_channels, f'channels of image ({channels}) should be equal to the channels that CLIP accepts ({clip.image_channels})'
|
||||||
|
|
||||||
if isinstance(clip, CLIP):
|
if isinstance(clip, CLIP):
|
||||||
clip = XClipAdapter(clip, **clip_adapter_overrides)
|
clip = XClipAdapter(clip, **clip_adapter_overrides)
|
||||||
elif isinstance(clip, CoCa):
|
elif isinstance(clip, CoCa):
|
||||||
@@ -1725,13 +1746,20 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
assert isinstance(clip, BaseClipAdapter)
|
assert isinstance(clip, BaseClipAdapter)
|
||||||
|
|
||||||
self.clip = clip
|
self.clip = clip
|
||||||
self.clip_image_size = clip.image_size
|
|
||||||
self.channels = clip.image_channels
|
|
||||||
else:
|
|
||||||
self.clip_image_size = default(image_size, lambda: image_sizes[-1])
|
|
||||||
self.channels = channels
|
|
||||||
|
|
||||||
self.condition_on_text_encodings = condition_on_text_encodings
|
# determine image size, with image_size and image_sizes taking precedence
|
||||||
|
|
||||||
|
if exists(image_size) or exists(image_sizes):
|
||||||
|
assert exists(image_size) ^ exists(image_sizes), 'only one of image_size or image_sizes must be given'
|
||||||
|
image_size = default(image_size, lambda: image_sizes[-1])
|
||||||
|
elif exists(clip):
|
||||||
|
image_size = clip.image_size
|
||||||
|
else:
|
||||||
|
raise Error('either image_size, image_sizes, or clip must be given to decoder')
|
||||||
|
|
||||||
|
# channels
|
||||||
|
|
||||||
|
self.channels = channels
|
||||||
|
|
||||||
# automatically take care of ensuring that first unet is unconditional
|
# automatically take care of ensuring that first unet is unconditional
|
||||||
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
|
# while the rest of the unets are conditioned on the low resolution image produced by previous unet
|
||||||
@@ -1773,7 +1801,7 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
|
|
||||||
# unet image sizes
|
# unet image sizes
|
||||||
|
|
||||||
image_sizes = default(image_sizes, (self.clip_image_size,))
|
image_sizes = default(image_sizes, (image_size,))
|
||||||
image_sizes = tuple(sorted(set(image_sizes)))
|
image_sizes = tuple(sorted(set(image_sizes)))
|
||||||
|
|
||||||
assert len(self.unets) == len(image_sizes), f'you did not supply the correct number of u-nets ({len(self.unets)}) for resolutions {image_sizes}'
|
assert len(self.unets) == len(image_sizes), f'you did not supply the correct number of u-nets ({len(self.unets)}) for resolutions {image_sizes}'
|
||||||
@@ -1810,7 +1838,13 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
self.clip_denoised = clip_denoised
|
self.clip_denoised = clip_denoised
|
||||||
self.clip_x_start = clip_x_start
|
self.clip_x_start = clip_x_start
|
||||||
|
|
||||||
|
# dynamic thresholding settings, if clipping denoised during sampling
|
||||||
|
|
||||||
|
self.use_dynamic_thres = use_dynamic_thres
|
||||||
|
self.dynamic_thres_percentile = dynamic_thres_percentile
|
||||||
|
|
||||||
# normalize and unnormalize image functions
|
# normalize and unnormalize image functions
|
||||||
|
|
||||||
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
|
self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity
|
||||||
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
|
self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity
|
||||||
|
|
||||||
@@ -1851,7 +1885,21 @@ class Decoder(BaseGaussianDiffusion):
|
|||||||
x_recon = self.predict_start_from_noise(x, t = t, noise = pred)
|
x_recon = self.predict_start_from_noise(x, t = t, noise = pred)
|
||||||
|
|
||||||
if clip_denoised:
|
if clip_denoised:
|
||||||
x_recon.clamp_(-1., 1.)
|
# s is the threshold amount
|
||||||
|
# static thresholding would just be s = 1
|
||||||
|
s = 1.
|
||||||
|
if self.use_dynamic_thres:
|
||||||
|
s = torch.quantile(
|
||||||
|
rearrange(x_recon, 'b ... -> b (...)').abs(),
|
||||||
|
self.dynamic_thres_percentile,
|
||||||
|
dim = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
s.clamp_(min = 1.)
|
||||||
|
s = s.view(-1, *((1,) * (x_recon.ndim - 1)))
|
||||||
|
|
||||||
|
# clip by threshold, depending on whether static or dynamic
|
||||||
|
x_recon = x_recon.clamp(-s, s) / s
|
||||||
|
|
||||||
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ def get_optimizer(
|
|||||||
betas = (0.9, 0.999),
|
betas = (0.9, 0.999),
|
||||||
eps = 1e-8,
|
eps = 1e-8,
|
||||||
filter_by_requires_grad = False,
|
filter_by_requires_grad = False,
|
||||||
|
group_wd_params = True,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
if filter_by_requires_grad:
|
if filter_by_requires_grad:
|
||||||
@@ -20,12 +21,12 @@ def get_optimizer(
|
|||||||
if wd == 0:
|
if wd == 0:
|
||||||
return Adam(params, lr = lr, betas = betas, eps = eps)
|
return Adam(params, lr = lr, betas = betas, eps = eps)
|
||||||
|
|
||||||
params = set(params)
|
if group_wd_params:
|
||||||
wd_params, no_wd_params = separate_weight_decayable_params(params)
|
wd_params, no_wd_params = separate_weight_decayable_params(params)
|
||||||
|
|
||||||
param_groups = [
|
params = [
|
||||||
{'params': list(wd_params)},
|
{'params': list(wd_params)},
|
||||||
{'params': list(no_wd_params), 'weight_decay': 0},
|
{'params': list(no_wd_params), 'weight_decay': 0},
|
||||||
]
|
]
|
||||||
|
|
||||||
return AdamW(param_groups, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
|
||||||
|
|||||||
@@ -3,7 +3,18 @@ from torchvision import transforms as T
|
|||||||
from pydantic import BaseModel, validator, root_validator
|
from pydantic import BaseModel, validator, root_validator
|
||||||
from typing import List, Iterable, Optional, Union, Tuple, Dict, Any
|
from typing import List, Iterable, Optional, Union, Tuple, Dict, Any
|
||||||
|
|
||||||
from dalle2_pytorch.dalle2_pytorch import Unet, Decoder, DiffusionPrior, DiffusionPriorNetwork
|
from x_clip import CLIP as XCLIP
|
||||||
|
from coca_pytorch import CoCa
|
||||||
|
|
||||||
|
from dalle2_pytorch.dalle2_pytorch import (
|
||||||
|
CoCaAdapter,
|
||||||
|
OpenAIClipAdapter,
|
||||||
|
Unet,
|
||||||
|
Decoder,
|
||||||
|
DiffusionPrior,
|
||||||
|
DiffusionPriorNetwork,
|
||||||
|
XClipAdapter,
|
||||||
|
)
|
||||||
|
|
||||||
# helper functions
|
# helper functions
|
||||||
|
|
||||||
@@ -16,7 +27,44 @@ def default(val, d):
|
|||||||
def ListOrTuple(inner_type):
|
def ListOrTuple(inner_type):
|
||||||
return Union[List[inner_type], Tuple[inner_type]]
|
return Union[List[inner_type], Tuple[inner_type]]
|
||||||
|
|
||||||
# pydantic classes
|
# general pydantic classes
|
||||||
|
|
||||||
|
class TrainSplitConfig(BaseModel):
|
||||||
|
train: float = 0.75
|
||||||
|
val: float = 0.15
|
||||||
|
test: float = 0.1
|
||||||
|
|
||||||
|
@root_validator
|
||||||
|
def validate_all(cls, fields):
|
||||||
|
actual_sum = sum([*fields.values()])
|
||||||
|
if actual_sum != 1.:
|
||||||
|
raise ValueError(f'{fields.keys()} must sum to 1.0. Found: {actual_sum}')
|
||||||
|
return fields
|
||||||
|
|
||||||
|
class TrackerConfig(BaseModel):
|
||||||
|
tracker_type: str = 'console' # Decoder currently supports console and wandb
|
||||||
|
data_path: str = './models' # The path where files will be saved locally
|
||||||
|
init_config: Dict[str, Any] = None
|
||||||
|
wandb_entity: str = '' # Only needs to be set if tracker_type is wandb
|
||||||
|
wandb_project: str = ''
|
||||||
|
verbose: bool = False # Whether to print console logging for non-console trackers
|
||||||
|
|
||||||
|
# diffusion prior pydantic classes
|
||||||
|
|
||||||
|
class AdapterConfig(BaseModel):
|
||||||
|
make: str = "openai"
|
||||||
|
model: str = "ViT-L/14"
|
||||||
|
base_model_kwargs: Dict[str, Any] = None
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if self.make == "openai":
|
||||||
|
return OpenAIClipAdapter(self.model)
|
||||||
|
elif self.make == "x-clip":
|
||||||
|
return XClipAdapter(XCLIP(**self.base_model_kwargs))
|
||||||
|
elif self.make == "coca":
|
||||||
|
return CoCaAdapter(CoCa(**self.base_model_kwargs))
|
||||||
|
else:
|
||||||
|
raise AttributeError("No adapter with that name is available.")
|
||||||
|
|
||||||
class DiffusionPriorNetworkConfig(BaseModel):
|
class DiffusionPriorNetworkConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
@@ -35,8 +83,12 @@ class DiffusionPriorNetworkConfig(BaseModel):
|
|||||||
normformer: bool = False
|
normformer: bool = False
|
||||||
rotary_emb: bool = True
|
rotary_emb: bool = True
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
kwargs = self.dict()
|
||||||
|
return DiffusionPriorNetwork(**kwargs)
|
||||||
|
|
||||||
class DiffusionPriorConfig(BaseModel):
|
class DiffusionPriorConfig(BaseModel):
|
||||||
# only clip-less diffusion prior config for now
|
clip: AdapterConfig
|
||||||
net: DiffusionPriorNetworkConfig
|
net: DiffusionPriorNetworkConfig
|
||||||
image_embed_dim: int
|
image_embed_dim: int
|
||||||
image_size: int
|
image_size: int
|
||||||
@@ -46,15 +98,52 @@ class DiffusionPriorConfig(BaseModel):
|
|||||||
loss_type: str = 'l2'
|
loss_type: str = 'l2'
|
||||||
predict_x_start: bool = True
|
predict_x_start: bool = True
|
||||||
beta_schedule: str = 'cosine'
|
beta_schedule: str = 'cosine'
|
||||||
|
condition_on_text_encodings: bool = True
|
||||||
def create(self):
|
|
||||||
kwargs = self.dict()
|
|
||||||
diffusion_prior_network = DiffusionPriorNetwork(**kwargs.pop('net'))
|
|
||||||
return DiffusionPrior(net = diffusion_prior_network, **kwargs)
|
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
extra = "allow"
|
extra = "allow"
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
kwargs = self.dict()
|
||||||
|
clip = AdapterConfig(**kwargs.pop('clip')).create()
|
||||||
|
diffusion_prior_network = DiffusionPriorNetworkConfig(**kwargs.pop('net')).create()
|
||||||
|
return DiffusionPrior(net = diffusion_prior_network, clip=clip, **kwargs)
|
||||||
|
|
||||||
|
class DiffusionPriorTrainConfig(BaseModel):
|
||||||
|
epochs: int = 1
|
||||||
|
lr: float = 1.1e-4
|
||||||
|
wd: float = 6.02e-2
|
||||||
|
max_grad_norm: float = 0.5
|
||||||
|
use_ema: bool = True
|
||||||
|
ema_beta: float = 0.99
|
||||||
|
amp: bool = False
|
||||||
|
save_every: int = 10000 # what steps to save on
|
||||||
|
|
||||||
|
class DiffusionPriorDataConfig(BaseModel):
|
||||||
|
image_url: str # path to embeddings folder
|
||||||
|
meta_url: str # path to metadata (captions) for images
|
||||||
|
splits: TrainSplitConfig
|
||||||
|
batch_size: int = 64
|
||||||
|
|
||||||
|
class DiffusionPriorLoadConfig(BaseModel):
|
||||||
|
source: str = None
|
||||||
|
resume: bool = False
|
||||||
|
|
||||||
|
class TrainDiffusionPriorConfig(BaseModel):
|
||||||
|
prior: DiffusionPriorConfig
|
||||||
|
data: DiffusionPriorDataConfig
|
||||||
|
train: DiffusionPriorTrainConfig
|
||||||
|
load: DiffusionPriorLoadConfig
|
||||||
|
tracker: TrackerConfig
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json_path(cls, json_path):
|
||||||
|
with open(json_path) as f:
|
||||||
|
config = json.load(f)
|
||||||
|
return cls(**config)
|
||||||
|
|
||||||
|
# decoder pydantic classes
|
||||||
|
|
||||||
class UnetConfig(BaseModel):
|
class UnetConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
dim_mults: ListOrTuple(int)
|
dim_mults: ListOrTuple(int)
|
||||||
@@ -94,17 +183,6 @@ class DecoderConfig(BaseModel):
|
|||||||
class Config:
|
class Config:
|
||||||
extra = "allow"
|
extra = "allow"
|
||||||
|
|
||||||
class TrainSplitConfig(BaseModel):
|
|
||||||
train: float = 0.75
|
|
||||||
val: float = 0.15
|
|
||||||
test: float = 0.1
|
|
||||||
|
|
||||||
@root_validator
|
|
||||||
def validate_all(cls, fields):
|
|
||||||
if sum([*fields.values()]) != 1.:
|
|
||||||
raise ValueError(f'{fields.keys()} must sum to 1.0')
|
|
||||||
return fields
|
|
||||||
|
|
||||||
class DecoderDataConfig(BaseModel):
|
class DecoderDataConfig(BaseModel):
|
||||||
webdataset_base_url: str # path to a webdataset with jpg images
|
webdataset_base_url: str # path to a webdataset with jpg images
|
||||||
embeddings_url: str # path to .npy files with embeddings
|
embeddings_url: str # path to .npy files with embeddings
|
||||||
@@ -160,14 +238,6 @@ class DecoderEvaluateConfig(BaseModel):
|
|||||||
KID: Dict[str, Any] = None
|
KID: Dict[str, Any] = None
|
||||||
LPIPS: Dict[str, Any] = None
|
LPIPS: Dict[str, Any] = None
|
||||||
|
|
||||||
class TrackerConfig(BaseModel):
|
|
||||||
tracker_type: str = 'console' # Decoder currently supports console and wandb
|
|
||||||
data_path: str = './models' # The path where files will be saved locally
|
|
||||||
init_config: Dict[str, Any] = None
|
|
||||||
wandb_entity: str = '' # Only needs to be set if tracker_type is wandb
|
|
||||||
wandb_project: str = ''
|
|
||||||
verbose: bool = False # Whether to print console logging for non-console trackers
|
|
||||||
|
|
||||||
class DecoderLoadConfig(BaseModel):
|
class DecoderLoadConfig(BaseModel):
|
||||||
source: str = None # Supports file and wandb
|
source: str = None # Supports file and wandb
|
||||||
run_path: str = '' # Used only if source is wandb
|
run_path: str = '' # Used only if source is wandb
|
||||||
|
|||||||
@@ -254,6 +254,7 @@ class DiffusionPriorTrainer(nn.Module):
|
|||||||
eps = 1e-6,
|
eps = 1e-6,
|
||||||
max_grad_norm = None,
|
max_grad_norm = None,
|
||||||
amp = False,
|
amp = False,
|
||||||
|
group_wd_params = True,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -279,6 +280,7 @@ class DiffusionPriorTrainer(nn.Module):
|
|||||||
lr = lr,
|
lr = lr,
|
||||||
wd = wd,
|
wd = wd,
|
||||||
eps = eps,
|
eps = eps,
|
||||||
|
group_wd_params = group_wd_params,
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -410,6 +412,7 @@ class DecoderTrainer(nn.Module):
|
|||||||
eps = 1e-8,
|
eps = 1e-8,
|
||||||
max_grad_norm = 0.5,
|
max_grad_norm = 0.5,
|
||||||
amp = False,
|
amp = False,
|
||||||
|
group_wd_params = True,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -435,6 +438,7 @@ class DecoderTrainer(nn.Module):
|
|||||||
lr = unet_lr,
|
lr = unet_lr,
|
||||||
wd = unet_wd,
|
wd = unet_wd,
|
||||||
eps = unet_eps,
|
eps = unet_eps,
|
||||||
|
group_wd_params = group_wd_params,
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user