mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2025-12-19 09:44:19 +01:00
make training splits into its own pydantic base model, validate it sums to 1, make decoder script cleaner
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
from torchvision import transforms as T
|
from torchvision import transforms as T
|
||||||
from pydantic import BaseModel, validator
|
from pydantic import BaseModel, validator, root_validator
|
||||||
from typing import List, Iterable, Optional, Union, Tuple, Dict, Any
|
from typing import List, Iterable, Optional, Union, Tuple, Dict, Any
|
||||||
|
|
||||||
def exists(val):
|
def exists(val):
|
||||||
@@ -38,6 +38,17 @@ class DecoderConfig(BaseModel):
|
|||||||
class Config:
|
class Config:
|
||||||
extra = "allow"
|
extra = "allow"
|
||||||
|
|
||||||
|
class TrainSplitConfig(BaseModel):
|
||||||
|
train: float = 0.75
|
||||||
|
val: float = 0.15
|
||||||
|
test: float = 0.1
|
||||||
|
|
||||||
|
@root_validator
|
||||||
|
def validate_all(cls, fields):
|
||||||
|
if sum([*fields.values()]) != 1.:
|
||||||
|
raise ValueError(f'{fields.keys()} must sum to 1.0')
|
||||||
|
return fields
|
||||||
|
|
||||||
class DecoderDataConfig(BaseModel):
|
class DecoderDataConfig(BaseModel):
|
||||||
webdataset_base_url: str # path to a webdataset with jpg images
|
webdataset_base_url: str # path to a webdataset with jpg images
|
||||||
embeddings_url: str # path to .npy files with embeddings
|
embeddings_url: str # path to .npy files with embeddings
|
||||||
@@ -47,11 +58,7 @@ class DecoderDataConfig(BaseModel):
|
|||||||
end_shard: int = 9999999
|
end_shard: int = 9999999
|
||||||
shard_width: int = 6
|
shard_width: int = 6
|
||||||
index_width: int = 4
|
index_width: int = 4
|
||||||
splits: Dict[str, float] = {
|
splits: TrainSplitConfig
|
||||||
'train': 0.75,
|
|
||||||
'val': 0.15,
|
|
||||||
'test': 0.1
|
|
||||||
}
|
|
||||||
shuffle_train: bool = True
|
shuffle_train: bool = True
|
||||||
resample_train: bool = False
|
resample_train: bool = False
|
||||||
preprocessing: Dict[str, Any] = {'ToTensor': True}
|
preprocessing: Dict[str, Any] = {'ToTensor': True}
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -10,7 +10,7 @@ setup(
|
|||||||
'dream = dalle2_pytorch.cli:dream'
|
'dream = dalle2_pytorch.cli:dream'
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
version = '0.4.0',
|
version = '0.4.1',
|
||||||
license='MIT',
|
license='MIT',
|
||||||
description = 'DALL-E 2',
|
description = 'DALL-E 2',
|
||||||
author = 'Phil Wang',
|
author = 'Phil Wang',
|
||||||
|
|||||||
@@ -422,9 +422,9 @@ def initialize_training(config):
|
|||||||
dataloaders = create_dataloaders (
|
dataloaders = create_dataloaders (
|
||||||
available_shards=all_shards,
|
available_shards=all_shards,
|
||||||
img_preproc = config.img_preproc,
|
img_preproc = config.img_preproc,
|
||||||
train_prop = config.data["splits"]["train"],
|
train_prop = config.data.splits.train,
|
||||||
val_prop = config.data["splits"]["val"],
|
val_prop = config.data.splits.val,
|
||||||
test_prop = config.data["splits"]["test"],
|
test_prop = config.data.splits.test,
|
||||||
n_sample_images=config.train.n_sample_images,
|
n_sample_images=config.train.n_sample_images,
|
||||||
**config.data.dict()
|
**config.data.dict()
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user