mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-14 18:04:26 +01:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
680dfc4d93 | ||
|
|
b6fecae91a | ||
|
|
dab2f74650 | ||
|
|
1e173f4c66 | ||
|
|
410a6144e1 | ||
|
|
c6c3882dc1 |
@@ -9,7 +9,7 @@
|
|||||||
"dim_mults": [1, 2, 4, 8],
|
"dim_mults": [1, 2, 4, 8],
|
||||||
"attn_dim_head": 16,
|
"attn_dim_head": 16,
|
||||||
"attn_heads": 4,
|
"attn_heads": 4,
|
||||||
"self_attn": [false, true, true, true]
|
"self_attn": [false, true, true, true]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"clip": {
|
"clip": {
|
||||||
|
|||||||
@@ -1,10 +1,3 @@
|
|||||||
import torch
|
|
||||||
from packaging import version
|
|
||||||
|
|
||||||
if version.parse(torch.__version__) >= version.parse('2.0.0'):
|
|
||||||
from einops._torch_specific import allow_ops_in_compiled_graph
|
|
||||||
allow_ops_in_compiled_graph()
|
|
||||||
|
|
||||||
from dalle2_pytorch.version import __version__
|
from dalle2_pytorch.version import __version__
|
||||||
from dalle2_pytorch.dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder
|
from dalle2_pytorch.dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder
|
||||||
from dalle2_pytorch.dalle2_pytorch import OpenAIClipAdapter, OpenClipAdapter
|
from dalle2_pytorch.dalle2_pytorch import OpenAIClipAdapter, OpenClipAdapter
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ class TrackerConfig(BaseModel):
|
|||||||
class AdapterConfig(BaseModel):
|
class AdapterConfig(BaseModel):
|
||||||
make: str = "openai"
|
make: str = "openai"
|
||||||
model: str = "ViT-L/14"
|
model: str = "ViT-L/14"
|
||||||
base_model_kwargs: Dict[str, Any] = None
|
base_model_kwargs: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if self.make == "openai":
|
if self.make == "openai":
|
||||||
@@ -134,8 +134,8 @@ class AdapterConfig(BaseModel):
|
|||||||
class DiffusionPriorNetworkConfig(BaseModel):
|
class DiffusionPriorNetworkConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
depth: int
|
depth: int
|
||||||
max_text_len: int = None
|
max_text_len: Optional[int] = None
|
||||||
num_timesteps: int = None
|
num_timesteps: Optional[int] = None
|
||||||
num_time_embeds: int = 1
|
num_time_embeds: int = 1
|
||||||
num_image_embeds: int = 1
|
num_image_embeds: int = 1
|
||||||
num_text_embeds: int = 1
|
num_text_embeds: int = 1
|
||||||
@@ -158,7 +158,7 @@ class DiffusionPriorNetworkConfig(BaseModel):
|
|||||||
return DiffusionPriorNetwork(**kwargs)
|
return DiffusionPriorNetwork(**kwargs)
|
||||||
|
|
||||||
class DiffusionPriorConfig(BaseModel):
|
class DiffusionPriorConfig(BaseModel):
|
||||||
clip: AdapterConfig = None
|
clip: Optional[AdapterConfig] = None
|
||||||
net: DiffusionPriorNetworkConfig
|
net: DiffusionPriorNetworkConfig
|
||||||
image_embed_dim: int
|
image_embed_dim: int
|
||||||
image_size: int
|
image_size: int
|
||||||
@@ -195,7 +195,7 @@ class DiffusionPriorTrainConfig(BaseModel):
|
|||||||
use_ema: bool = True
|
use_ema: bool = True
|
||||||
ema_beta: float = 0.99
|
ema_beta: float = 0.99
|
||||||
amp: bool = False
|
amp: bool = False
|
||||||
warmup_steps: int = None # number of warmup steps
|
warmup_steps: Optional[int] = None # number of warmup steps
|
||||||
save_every_seconds: int = 3600 # how often to save
|
save_every_seconds: int = 3600 # how often to save
|
||||||
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
|
eval_timesteps: List[int] = [64] # which sampling timesteps to evaluate with
|
||||||
best_validation_loss: float = 1e9 # the current best valudation loss observed
|
best_validation_loss: float = 1e9 # the current best valudation loss observed
|
||||||
@@ -228,12 +228,12 @@ class TrainDiffusionPriorConfig(BaseModel):
|
|||||||
class UnetConfig(BaseModel):
|
class UnetConfig(BaseModel):
|
||||||
dim: int
|
dim: int
|
||||||
dim_mults: ListOrTuple[int]
|
dim_mults: ListOrTuple[int]
|
||||||
image_embed_dim: int = None
|
image_embed_dim: Optional[int] = None
|
||||||
text_embed_dim: int = None
|
text_embed_dim: Optional[int] = None
|
||||||
cond_on_text_encodings: bool = None
|
cond_on_text_encodings: Optional[bool] = None
|
||||||
cond_dim: int = None
|
cond_dim: Optional[int] = None
|
||||||
channels: int = 3
|
channels: int = 3
|
||||||
self_attn: ListOrTuple[int]
|
self_attn: SingularOrIterable[bool] = False
|
||||||
attn_dim_head: int = 32
|
attn_dim_head: int = 32
|
||||||
attn_heads: int = 16
|
attn_heads: int = 16
|
||||||
init_cross_embed: bool = True
|
init_cross_embed: bool = True
|
||||||
@@ -243,14 +243,14 @@ class UnetConfig(BaseModel):
|
|||||||
|
|
||||||
class DecoderConfig(BaseModel):
|
class DecoderConfig(BaseModel):
|
||||||
unets: ListOrTuple[UnetConfig]
|
unets: ListOrTuple[UnetConfig]
|
||||||
image_size: int = None
|
image_size: Optional[int] = None
|
||||||
image_sizes: ListOrTuple[int] = None
|
image_sizes: ListOrTuple[int] = None
|
||||||
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
|
clip: Optional[AdapterConfig] = None # The clip model to use if embeddings are not provided
|
||||||
channels: int = 3
|
channels: int = 3
|
||||||
timesteps: int = 1000
|
timesteps: int = 1000
|
||||||
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
|
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None
|
||||||
loss_type: str = 'l2'
|
loss_type: str = 'l2'
|
||||||
beta_schedule: ListOrTuple[str] = None # None means all cosine
|
beta_schedule: Optional[ListOrTuple[str]] = None # None means all cosine
|
||||||
learned_variance: SingularOrIterable[bool] = True
|
learned_variance: SingularOrIterable[bool] = True
|
||||||
image_cond_drop_prob: float = 0.1
|
image_cond_drop_prob: float = 0.1
|
||||||
text_cond_drop_prob: float = 0.5
|
text_cond_drop_prob: float = 0.5
|
||||||
@@ -320,20 +320,20 @@ class DecoderTrainConfig(BaseModel):
|
|||||||
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
|
n_sample_images: int = 6 # The number of example images to produce when sampling the train and test dataset
|
||||||
cond_scale: Union[float, List[float]] = 1.0
|
cond_scale: Union[float, List[float]] = 1.0
|
||||||
device: str = 'cuda:0'
|
device: str = 'cuda:0'
|
||||||
epoch_samples: int = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
|
epoch_samples: Optional[int] = None # Limits the number of samples per epoch. None means no limit. Required if resample_train is true as otherwise the number of samples per epoch is infinite.
|
||||||
validation_samples: int = None # Same as above but for validation.
|
validation_samples: Optional[int] = None # Same as above but for validation.
|
||||||
save_immediately: bool = False
|
save_immediately: bool = False
|
||||||
use_ema: bool = True
|
use_ema: bool = True
|
||||||
ema_beta: float = 0.999
|
ema_beta: float = 0.999
|
||||||
amp: bool = False
|
amp: bool = False
|
||||||
unet_training_mask: ListOrTuple[bool] = None # If None, use all unets
|
unet_training_mask: Optional[ListOrTuple[bool]] = None # If None, use all unets
|
||||||
|
|
||||||
class DecoderEvaluateConfig(BaseModel):
|
class DecoderEvaluateConfig(BaseModel):
|
||||||
n_evaluation_samples: int = 1000
|
n_evaluation_samples: int = 1000
|
||||||
FID: Dict[str, Any] = None
|
FID: Optional[Dict[str, Any]] = None
|
||||||
IS: Dict[str, Any] = None
|
IS: Optional[Dict[str, Any]] = None
|
||||||
KID: Dict[str, Any] = None
|
KID: Optional[Dict[str, Any]] = None
|
||||||
LPIPS: Dict[str, Any] = None
|
LPIPS: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
class TrainDecoderConfig(BaseModel):
|
class TrainDecoderConfig(BaseModel):
|
||||||
decoder: DecoderConfig
|
decoder: DecoderConfig
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = '1.15.2'
|
__version__ = '1.15.6'
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -30,7 +30,7 @@ setup(
|
|||||||
'clip-anytorch>=2.5.2',
|
'clip-anytorch>=2.5.2',
|
||||||
'coca-pytorch>=0.0.5',
|
'coca-pytorch>=0.0.5',
|
||||||
'ema-pytorch>=0.0.7',
|
'ema-pytorch>=0.0.7',
|
||||||
'einops>=0.6.1',
|
'einops>=0.7.0',
|
||||||
'embedding-reader',
|
'embedding-reader',
|
||||||
'kornia>=0.5.4',
|
'kornia>=0.5.4',
|
||||||
'numpy',
|
'numpy',
|
||||||
|
|||||||
@@ -511,7 +511,7 @@ def train(
|
|||||||
if next_task == 'eval':
|
if next_task == 'eval':
|
||||||
if exists(evaluate_config):
|
if exists(evaluate_config):
|
||||||
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
|
accelerator.print(print_ribbon(f"Starting Evaluation {epoch}", repeat=40))
|
||||||
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.dict(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
|
evaluation = evaluate_trainer(trainer, dataloaders["val"], inference_device, first_trainable_unet, last_trainable_unet, clip=clip, inference_device=inference_device, **evaluate_config.model_dump(), condition_on_text_encodings=condition_on_text_encodings, cond_scale=cond_scale)
|
||||||
if is_master:
|
if is_master:
|
||||||
tracker.log(evaluation, step=step())
|
tracker.log(evaluation, step=step())
|
||||||
next_task = 'sample'
|
next_task = 'sample'
|
||||||
@@ -548,7 +548,7 @@ def create_tracker(accelerator: Accelerator, config: TrainDecoderConfig, config_
|
|||||||
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
|
accelerator.wait_for_everyone() # If nodes arrive at this point at different times they might try to autoresume the current run which makes no sense and will cause errors
|
||||||
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
|
tracker: Tracker = tracker_config.create(config, accelerator_config, dummy_mode=dummy)
|
||||||
tracker.save_config(config_path, config_name='decoder_config.json')
|
tracker.save_config(config_path, config_name='decoder_config.json')
|
||||||
tracker.add_save_metadata(state_dict_key='config', metadata=config.dict())
|
tracker.add_save_metadata(state_dict_key='config', metadata=config.model_dump())
|
||||||
return tracker
|
return tracker
|
||||||
|
|
||||||
def initialize_training(config: TrainDecoderConfig, config_path):
|
def initialize_training(config: TrainDecoderConfig, config_path):
|
||||||
@@ -585,7 +585,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
val_prop = config.data.splits.val,
|
val_prop = config.data.splits.val,
|
||||||
test_prop = config.data.splits.test,
|
test_prop = config.data.splits.test,
|
||||||
n_sample_images=config.train.n_sample_images,
|
n_sample_images=config.train.n_sample_images,
|
||||||
**config.data.dict(),
|
**config.data.model_dump(),
|
||||||
rank = rank,
|
rank = rank,
|
||||||
seed = config.seed,
|
seed = config.seed,
|
||||||
)
|
)
|
||||||
@@ -636,7 +636,7 @@ def initialize_training(config: TrainDecoderConfig, config_path):
|
|||||||
inference_device=accelerator.device,
|
inference_device=accelerator.device,
|
||||||
evaluate_config=config.evaluate,
|
evaluate_config=config.evaluate,
|
||||||
condition_on_text_encodings=conditioning_on_text,
|
condition_on_text_encodings=conditioning_on_text,
|
||||||
**config.train.dict(),
|
**config.train.model_dump(),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create a simple click command line interface to load the config and start the training
|
# Create a simple click command line interface to load the config and start the training
|
||||||
|
|||||||
Reference in New Issue
Block a user