Compare commits

..

2 Commits

Author SHA1 Message Date
lucidrains
1e173f4c66 more fixes to config 2023-10-18 20:27:32 -07:00
lucidrains
410a6144e1 new einops is torch compile friendly 2023-10-18 15:45:09 -07:00
5 changed files with 5 additions and 12 deletions

View File

@@ -9,7 +9,7 @@
"dim_mults": [1, 2, 4, 8],
"attn_dim_head": 16,
"attn_heads": 4,
"self_attn": [false, true, true, true]
"self_attn": [false, true, true, true]
}
],
"clip": {

View File

@@ -1,10 +1,3 @@
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from dalle2_pytorch.version import __version__
from dalle2_pytorch.dalle2_pytorch import DALLE2, DiffusionPriorNetwork, DiffusionPrior, Unet, Decoder
from dalle2_pytorch.dalle2_pytorch import OpenAIClipAdapter, OpenClipAdapter

View File

@@ -233,7 +233,7 @@ class UnetConfig(BaseModel):
cond_on_text_encodings: Optional[bool] = None
cond_dim: Optional[int] = None
channels: int = 3
self_attn: ListOrTuple[int]
self_attn: ListOrTuple[bool]
attn_dim_head: int = 32
attn_heads: int = 16
init_cross_embed: bool = True
@@ -245,7 +245,7 @@ class DecoderConfig(BaseModel):
unets: ListOrTuple[UnetConfig]
image_size: Optional[int] = None
image_sizes: ListOrTuple[int] = None
clip: Optional[AdapterConfig] # The clip model to use if embeddings are not provided
clip: Optional[AdapterConfig] = None # The clip model to use if embeddings are not provided
channels: int = 3
timesteps: int = 1000
sample_timesteps: Optional[SingularOrIterable[Optional[int]]] = None

View File

@@ -1 +1 @@
__version__ = '1.15.3'
__version__ = '1.15.5'

View File

@@ -30,7 +30,7 @@ setup(
'clip-anytorch>=2.5.2',
'coca-pytorch>=0.0.5',
'ema-pytorch>=0.0.7',
'einops>=0.6.1',
'einops>=0.7.0',
'embedding-reader',
'kornia>=0.5.4',
'numpy',