Compare commits

...

4 Commits
1.15.4 ... main

Author SHA1 Message Date
lucidrains
680dfc4d93 yet more pydantic v2 stuff 2023-10-19 07:40:57 -07:00
lucidrains
b6fecae91a fix another pydantic 2 migration error 2023-10-18 21:07:47 -07:00
lucidrains
dab2f74650 fix self_attn type on unetconfig 2023-10-18 21:02:50 -07:00
lucidrains
1e173f4c66 more fixes to config 2023-10-18 20:27:32 -07:00
4 changed files with 8 additions and 8 deletions

View File

@@ -9,7 +9,7 @@
"dim_mults": [1, 2, 4, 8],
"attn_dim_head": 16,
"attn_heads": 4,
"self_attn": [false, true, true, true]
"self_attn": [false, true, true, true]
}
],
"clip": {

View File

@@ -233,7 +233,7 @@ class UnetConfig(BaseModel):
cond_on_text_encodings: Optional[bool] = None
cond_dim: Optional[int] = None
channels: int = 3
self_attn: ListOrTuple[int]
self_attn: SingularOrIterable[bool] = False
attn_dim_head: int = 32
attn_heads: int = 16
init_cross_embed: bool = True
@@ -245,7 +245,7 @@ class DecoderConfig(BaseModel):
unets: ListOrTuple[UnetConfig]
image_size: Optional[int] = None
image_sizes: ListOrTuple[int] = None
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
clip: Optional[AdapterConfig] = None # The clip model to use if embeddings are not provided
channels: int = 3
timesteps: int = 1000
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None

View File

@@ -1 +1 @@
__version__ = '1.15.4'
__version__ = '1.15.6'

View File

@@ -511,7 +511,7 @@ def train(
if next_task == 'eval':
if exists(evaluate_config):
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.dict(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.model_dump(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
if is_master:
tracker.log(evaluation, step=step())
next_task = 'sample'
@@ -548,7 +548,7 @@ def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
tracker.save_config(config_path, config_name='decoder_config.json')
tracker.add_save_metadata(state_dict_key='config', metadata=config.dict())
tracker.add_save_metadata(state_dict_key='config', metadata=config.model_dump())
return tracker
def initialize_training(config: TrainDecoderConfig, config_path):
@@ -585,7 +585,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
val_prop = config.data.splits.val,
test_prop = config.data.splits.test,
n_sample_images=config.train.n_sample_images,
**config.data.dict(),
**config.data.model_dump(),
rank = rank,
seed = config.seed,
)
@@ -636,7 +636,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
inference_device=accelerator.device,
evaluate_config=config.evaluate,
condition_on_text_encodings=conditioning_on_text,
**config.train.dict(),
**config.train.model_dump(),
)
# Create a simple click command line interface to load the config and start the training