mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2025-12-19 01:34:19 +01:00
pydantic 2
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
from torchvision import transforms as T
|
from torchvision import transforms as T
|
||||||
from pydantic import BaseModel, validator, root_validator
|
from pydantic import BaseModel, validator, model_validator
|
||||||
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
|
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
|
||||||
|
|
||||||
from x_clip import CLIP as XCLIP
|
from x_clip import CLIP as XCLIP
|
||||||
@@ -38,12 +38,12 @@ class TrainSplitConfig(BaseModel):
|
|||||||
val: float = 0.15
|
val: float = 0.15
|
||||||
test: float = 0.1
|
test: float = 0.1
|
||||||
|
|
||||||
@root_validator
|
@model_validator(mode = 'after')
|
||||||
def validate_all(cls, fields):
|
def validate_all(self, m):
|
||||||
actual_sum = sum([*fields.values()])
|
actual_sum = sum([*dict(self).values()])
|
||||||
if actual_sum != 1.:
|
if actual_sum != 1.:
|
||||||
raise ValueError(f'{fields.keys()} must sum to 1.0. Found: {actual_sum}')
|
raise ValueError(f'{dict(self).keys()} must sum to 1.0. Found: {actual_sum}')
|
||||||
return fields
|
return self
|
||||||
|
|
||||||
class TrackerLogConfig(BaseModel):
|
class TrackerLogConfig(BaseModel):
|
||||||
log_type: str = 'console'
|
log_type: str = 'console'
|
||||||
@@ -59,6 +59,7 @@ class TrackerLogConfig(BaseModel):
|
|||||||
kwargs = self.dict()
|
kwargs = self.dict()
|
||||||
return create_logger(self.log_type, data_path, **kwargs)
|
return create_logger(self.log_type, data_path, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class TrackerLoadConfig(BaseModel):
|
class TrackerLoadConfig(BaseModel):
|
||||||
load_from: Optional[str] = None
|
load_from: Optional[str] = None
|
||||||
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
|
only_auto_resume: bool = False # Only attempt to load if the logger is auto-resuming
|
||||||
@@ -277,9 +278,9 @@ class DecoderConfig(BaseModel):
|
|||||||
extra = "allow"
|
extra = "allow"
|
||||||
|
|
||||||
class DecoderDataConfig(BaseModel):
|
class DecoderDataConfig(BaseModel):
|
||||||
webdataset_base_url: str # path to a webdataset with jpg images
|
webdataset_base_url: str # path to a webdataset with jpg images
|
||||||
img_embeddings_url: Optional[str] # path to .npy files with embeddings
|
img_embeddings_url: Optional[str] = None # path to .npy files with embeddings
|
||||||
text_embeddings_url: Optional[str] # path to .npy files with embeddings
|
text_embeddings_url: Optional[str] = None # path to .npy files with embeddings
|
||||||
num_workers: int = 4
|
num_workers: int = 4
|
||||||
batch_size: int = 64
|
batch_size: int = 64
|
||||||
start_shard: int = 0
|
start_shard: int = 0
|
||||||
@@ -346,11 +347,14 @@ class TrainDecoderConfig(BaseModel):
|
|||||||
def from_json_path(cls, json_path):
|
def from_json_path(cls, json_path):
|
||||||
with open(json_path) as f:
|
with open(json_path) as f:
|
||||||
config = json.load(f)
|
config = json.load(f)
|
||||||
|
print(config)
|
||||||
return cls(**config)
|
return cls(**config)
|
||||||
|
|
||||||
@root_validator
|
@model_validator(mode = 'after')
|
||||||
def check_has_embeddings(cls, values):
|
def check_has_embeddings(self, m):
|
||||||
# Makes sure that enough information is provided to get the embeddings specified for training
|
# Makes sure that enough information is provided to get the embeddings specified for training
|
||||||
|
values = dict(self)
|
||||||
|
|
||||||
data_config, decoder_config = values.get('data'), values.get('decoder')
|
data_config, decoder_config = values.get('data'), values.get('decoder')
|
||||||
|
|
||||||
if not exists(data_config) or not exists(decoder_config):
|
if not exists(data_config) or not exists(decoder_config):
|
||||||
@@ -375,4 +379,4 @@ class TrainDecoderConfig(BaseModel):
|
|||||||
if text_emb_url:
|
if text_emb_url:
|
||||||
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
|
assert using_text_embeddings, "Text embeddings are being loaded, but text embeddings are not being conditioned on. This will slow down the dataloader for no reason."
|
||||||
|
|
||||||
return values
|
return m
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = '1.14.2'
|
__version__ = '1.15.1'
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -36,7 +36,7 @@ setup(
|
|||||||
'numpy',
|
'numpy',
|
||||||
'packaging',
|
'packaging',
|
||||||
'pillow',
|
'pillow',
|
||||||
'pydantic',
|
'pydantic>=2',
|
||||||
'pytorch-warmup',
|
'pytorch-warmup',
|
||||||
'resize-right>=0.0.2',
|
'resize-right>=0.0.2',
|
||||||
'rotary-embedding-torch',
|
'rotary-embedding-torch',
|
||||||
|
|||||||
@@ -577,6 +577,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
shards_per_process = len(all_shards) // world_size
|
shards_per_process = len(all_shards) // world_size
|
||||||
assert shards_per_process > 0, "Not enough shards to split evenly"
|
assert shards_per_process > 0, "Not enough shards to split evenly"
|
||||||
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
|
my_shards = all_shards[rank * shards_per_process: (rank + 1) * shards_per_process]
|
||||||
|
|
||||||
dataloaders = create_dataloaders (
|
dataloaders = create_dataloaders (
|
||||||
available_shards=my_shards,
|
available_shards=my_shards,
|
||||||
img_preproc = config.data.img_preproc,
|
img_preproc = config.data.img_preproc,
|
||||||
|
|||||||
Reference in New Issue
Block a user