Compare commits

...

10 Commits
1.14.1 ... main

Author SHA1 Message Date
lucidrains
680dfc4d93 yet more pydantic v2 stuff 2023-10-19 07:40:57 -07:00
lucidrains
b6fecae91a fix another pydantic 2 migration error 2023-10-18 21:07:47 -07:00
lucidrains
dab2f74650 fix self_attn type on unetconfig 2023-10-18 21:02:50 -07:00
lucidrains
1e173f4c66 more fixes to config 2023-10-18 20:27:32 -07:00
lucidrains
410a6144e1 new einops is torch compile friendly 2023-10-18 15:45:09 -07:00
lucidrains
c6c3882dc1 fix all optional types in train config 2023-10-07 11:34:34 -07:00
Phil Wang
512b52bd78 1.15.2 2023-10-04 09:38:46 -07:00
Neil Kim Nielsen
147c156c8a Make TrackerLoadConfig optional (#306) 2023-10-04 09:38:30 -07:00
Phil Wang
40843bcc21 pydantic 2 2023-07-15 09:32:44 -07:00
Phil Wang
00e07b7d61 force einops 0.6.1 or greater and call allow_ops_in_compiled_graph 2023-04-20 14:08:52 -07:00
5 changed files with 46 additions and 41 deletions

View File

@@ -9,7 +9,7 @@
"dim_mults": [1, 2, 4, 8],
"attn_dim_head": 16,
"attn_heads": 4,
"self_attn": [false, true, true, true]
"self_attn": [false, true, true, true]
}
],
"clip": {

View File

@@ -1,6 +1,6 @@
import json
from torchvision import transforms as T
from pydantic import BaseModel, validator, root_validator
from pydantic import BaseModel, validator, model_validator
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
from x_clip import CLIP as XCLIP
@@ -38,12 +38,12 @@ class TrainSplitConfig(BaseModel):
val: float = 0.15
test: float = 0.1
@root_validator
def validate_all(cls, fields):
actual_sum = sum([*fields.values()])
@model_validator(mode = 'after')
def validate_all(self, m):
actual_sum = sum([*dict(self).values()])
if actual_sum != 1.:
raise ValueError(f'{fields.keys()} must sum to 1.0. Found: {actual_sum}')
return fields
raise ValueError(f'{dict(self).keys()} must sum to 1.0. Found: {actual_sum}')
return self
class TrackerLogConfig(BaseModel):
log_type: str = 'console'
@@ -59,6 +59,7 @@ class TrackerLogConfig(BaseModel):
kwargs = self.dict()
return create_logger(self.log_type, data_path, **kwargs)
class TrackerLoadConfig(BaseModel):
load_from: Optional[str] = None
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
@@ -89,7 +90,7 @@ class TrackerConfig(BaseModel):
data_path: str = '.tracker_data'
overwrite_data_path: bool = False
log: TrackerLogConfig
load: Optional[TrackerLoadConfig]
load: Optional[TrackerLoadConfig] = None
save: Union[List[TrackerSaveConfig], TrackerSaveConfig]
def create(self, full_config: BaseModel, extra_config: dict, dummy_mode: bool = False) -> Tracker:
@@ -114,7 +115,7 @@ class TrackerConfig(BaseModel):
class AdapterConfig(BaseModel):
make: str = "openai"
model: str = "ViT-L/14"
base_model_kwargs: Dict[str, Any] = None
base_model_kwargs: Optional[Dict[str, Any]] = None
def create(self):
if self.make == "openai":
@@ -133,8 +134,8 @@ class AdapterConfig(BaseModel):
class DiffusionPriorNetworkConfig(BaseModel):
dim: int
depth: int
max_text_len: int = None
num_timesteps: int = None
max_text_len: Optional[int] = None
num_timesteps: Optional[int] = None
num_time_embeds: int = 1
num_image_embeds: int = 1
num_text_embeds: int = 1
@@ -157,7 +158,7 @@ class DiffusionPriorNetworkConfig(BaseModel):
return DiffusionPriorNetwork(**kwargs)
class DiffusionPriorConfig(BaseModel):
clip: AdapterConfig = None
clip: Optional[AdapterConfig] = None
net: DiffusionPriorNetworkConfig
image_embed_dim: int
image_size: int
@@ -194,7 +195,7 @@ class DiffusionPriorTrainConfig(BaseModel):
use_ema: bool = True
ema_beta: float = 0.99
amp: bool = False
warmup_steps: int = None # number of warmup steps
warmup_steps: Optional[int] = None # number of warmup steps
save_every_seconds: int = 3600 # how often to save
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
best_validation_loss: float = 1e9 # the current best valudation loss observed
@@ -227,12 +228,12 @@ class TrainDiffusionPriorConfig(BaseModel):
class UnetConfig(BaseModel):
dim: int
dim_mults: ListOrTuple[int]
image_embed_dim: int = None
text_embed_dim: int = None
cond_on_text_encodings: bool = None
cond_dim: int = None
image_embed_dim: Optional[int] = None
text_embed_dim: Optional[int] = None
cond_on_text_encodings: Optional[bool] = None
cond_dim: Optional[int] = None
channels: int = 3
self_attn: ListOrTuple[int]
self_attn: SingularOrIterable[bool] = False
attn_dim_head: int = 32
attn_heads: int = 16
init_cross_embed: bool = True
@@ -242,14 +243,14 @@ class UnetConfig(BaseModel):
class DecoderConfig(BaseModel):
unets: ListOrTuple[UnetConfig]
image_size: int = None
image_size: Optional[int] = None
image_sizes: ListOrTuple[int] = None
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
clip: Optional[AdapterConfig] = None # The clip model to use if embeddings are not provided
channels: int = 3
timesteps: int = 1000
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
loss_type: str = 'l2'
beta_schedule: ListOrTuple[str] = None # None means all cosine
beta_schedule: Optional[ListOrTuple[str]] = None # None means all cosine
learned_variance: SingularOrIterable[bool] = True
image_cond_drop_prob: float = 0.1
text_cond_drop_prob: float = 0.5
@@ -277,9 +278,9 @@ class DecoderConfig(BaseModel):
extra = "allow"
class DecoderDataConfig(BaseModel):
webdataset_base_url: str # path to a webdataset with jpg images
img_embeddings_url: Optional[str] # path to .npy files with embeddings
text_embeddings_url: Optional[str] # path to .npy files with embeddings
webdataset_base_url: str # path to a webdataset with jpg images
img_embeddings_url: Optional[str] = None # path to .npy files with embeddings
text_embeddings_url: Optional[str] = None # path to .npy files with embeddings
num_workers: int = 4
batch_size: int = 64
start_shard: int = 0
@@ -319,20 +320,20 @@ class DecoderTrainConfig(BaseModel):
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
cond_scale: Union[float, List[float]] = 1.0
device: str = 'cuda:0'
epoch_samples: int = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
validation_samples: int = None # Same as above but for validation.
epoch_samples: Optional[int] = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
validation_samples: Optional[int] = None # Same as above but for validation.
save_immediately: bool = False
use_ema: bool = True
ema_beta: float = 0.999
amp: bool = False
unet_training_mask: ListOrTuple[bool] = None # If None, use all unets
unet_training_mask: Optional[ListOrTuple[bool]] = None # If None, use all unets
class DecoderEvaluateConfig(BaseModel):
n_evaluation_samples: int = 1000
FID: Dict[str, Any] = None
IS: Dict[str, Any] = None
KID: Dict[str, Any] = None
LPIPS: Dict[str, Any] = None
FID: Optional[Dict[str, Any]] = None
IS: Optional[Dict[str, Any]] = None
KID: Optional[Dict[str, Any]] = None
LPIPS: Optional[Dict[str, Any]] = None
class TrainDecoderConfig(BaseModel):
decoder: DecoderConfig
@@ -346,11 +347,14 @@ class TrainDecoderConfig(BaseModel):
def from_json_path(cls, json_path):
with open(json_path) as f:
config = json.load(f)
print(config)
return cls(**config)
@root_validator
def check_has_embeddings(cls, values):
@model_validator(mode = 'after')
def check_has_embeddings(self, m):
# Makes sure that enough information is provided to get the embeddings specified for training
values = dict(self)
data_config, decoder_config = values.get('data'), values.get('decoder')
if not exists(data_config) or not exists(decoder_config):
@@ -375,4 +379,4 @@ class TrainDecoderConfig(BaseModel):
if text_emb_url:
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
return values
return m

View File

@@ -1 +1 @@
__version__ = '1.14.0'
__version__ = '1.15.6'

View File

@@ -30,13 +30,13 @@ setup(
'clip-anytorch>=2.5.2',
'coca-pytorch>=0.0.5',
'ema-pytorch>=0.0.7',
'einops>=0.6',
'einops>=0.7.0',
'embedding-reader',
'kornia>=0.5.4',
'numpy',
'packaging',
'pillow',
'pydantic',
'pydantic>=2',
'pytorch-warmup',
'resize-right>=0.0.2',
'rotary-embedding-torch',

View File

@@ -511,7 +511,7 @@ def train(
if next_task == 'eval':
if exists(evaluate_config):
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.dict(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.model_dump(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
if is_master:
tracker.log(evaluation, step=step())
next_task = 'sample'
@@ -548,7 +548,7 @@ def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
tracker.save_config(config_path, config_name='decoder_config.json')
tracker.add_save_metadata(state_dict_key='config', metadata=config.dict())
tracker.add_save_metadata(state_dict_key='config', metadata=config.model_dump())
return tracker
def initialize_training(config: TrainDecoderConfig, config_path):
@@ -577,6 +577,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
shards_per_process = len(all_shards) // world_size
assert shards_per_process > 0, "Not enough shards to split evenly"
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
dataloaders = create_dataloaders (
available_shards=my_shards,
img_preproc = config.data.img_preproc,
@@ -584,7 +585,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
val_prop = config.data.splits.val,
test_prop = config.data.splits.test,
n_sample_images=config.train.n_sample_images,
**config.data.dict(),
**config.data.model_dump(),
rank = rank,
seed = config.seed,
)
@@ -635,7 +636,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
inference_device=accelerator.device,
evaluate_config=config.evaluate,
condition_on_text_encodings=conditioning_on_text,
**config.train.dict(),
**config.train.model_dump(),
)
# Create a simple click command line interface to load the config and start the training