mirror of
https://github.com/lucidrains/DALLE2-pytorch.git
synced 2026-02-12 11:34:29 +01:00
Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
984d62a373 | ||
|
|
683dd98b96 | ||
|
|
067ac323da | ||
|
|
91c8d1ca13 | ||
|
|
08238a7200 | ||
|
|
7166ad6711 |
@@ -360,6 +360,7 @@ class OpenAIClipAdapter(BaseClipAdapter):
|
||||
is_eos_id = (text == self.eos_id)
|
||||
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
|
||||
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
|
||||
text_mask = text_mask & (text != 0)
|
||||
assert not self.cleared
|
||||
|
||||
text_embed = self.clip.encode_text(text)
|
||||
@@ -434,6 +435,7 @@ class OpenClipAdapter(BaseClipAdapter):
|
||||
is_eos_id = (text == self.eos_id)
|
||||
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
|
||||
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
|
||||
text_mask = text_mask & (text != 0)
|
||||
assert not self.cleared
|
||||
|
||||
text_embed = self.clip.encode_text(text)
|
||||
@@ -1320,7 +1322,7 @@ class DiffusionPrior(nn.Module):
|
||||
elif self.predict_x_start:
|
||||
x_start = pred
|
||||
else:
|
||||
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred_noise)
|
||||
x_start = self.noise_scheduler.predict_start_from_noise(image_embed, t = time_cond, noise = pred)
|
||||
|
||||
# clip x0 before maybe predicting noise
|
||||
|
||||
@@ -2494,7 +2496,7 @@ class Decoder(nn.Module):
|
||||
dynamic_thres_percentile = 0.95,
|
||||
p2_loss_weight_gamma = 0., # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time - 1. is recommended
|
||||
p2_loss_weight_k = 1,
|
||||
ddim_sampling_eta = 1. # can be set to 0. for deterministic sampling afaict
|
||||
ddim_sampling_eta = 0. # can be set to 0. for deterministic sampling afaict
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
|
||||
@@ -4,11 +4,13 @@ from pydantic import BaseModel, validator, root_validator
|
||||
from typing import List, Optional, Union, Tuple, Dict, Any, TypeVar
|
||||
|
||||
from x_clip import CLIP as XCLIP
|
||||
from open_clip import list_pretrained
|
||||
from coca_pytorch import CoCa
|
||||
|
||||
from dalle2_pytorch.dalle2_pytorch import (
|
||||
CoCaAdapter,
|
||||
OpenAIClipAdapter,
|
||||
OpenClipAdapter,
|
||||
Unet,
|
||||
Decoder,
|
||||
DiffusionPrior,
|
||||
@@ -117,6 +119,10 @@ class AdapterConfig(BaseModel):
|
||||
def create(self):
|
||||
if self.make == "openai":
|
||||
return OpenAIClipAdapter(self.model)
|
||||
elif self.make == "open_clip":
|
||||
pretrained = dict(list_pretrained())
|
||||
checkpoint = pretrained[self.model]
|
||||
return OpenClipAdapter(name=self.model, pretrained=checkpoint)
|
||||
elif self.make == "x-clip":
|
||||
return XClipAdapter(XCLIP(**self.base_model_kwargs))
|
||||
elif self.make == "coca":
|
||||
|
||||
@@ -236,7 +236,7 @@ class DiffusionPriorTrainer(nn.Module):
|
||||
)
|
||||
|
||||
if exists(cosine_decay_max_steps):
|
||||
self.scheduler = CosineAnnealingLR(optimizer, T_max = cosine_decay_max_steps)
|
||||
self.scheduler = CosineAnnealingLR(self.optimizer, T_max = cosine_decay_max_steps)
|
||||
else:
|
||||
self.scheduler = LambdaLR(self.optimizer, lr_lambda = lambda _: 1.0)
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = '1.11.1'
|
||||
__version__ = '1.12.0'
|
||||
|
||||
Reference in New Issue
Block a user