add SV4D 2.0 (#440)

* add SV4D 2.0

* add SV4D 2.0

* Combined sv4dv2 and sv4dv2_8views sampling scripts

---------

Co-authored-by: Vikram Voleti <vikram@ip-26-0-153-234.us-west-2.compute.internal>
This commit is contained in:
chunhanyao-stable
2025-05-20 07:38:11 -07:00
committed by GitHub
parent 1659a1c09b
commit c3147b86db
44 changed files with 1000 additions and 116 deletions

View File

@@ -13,9 +13,6 @@ from einops import rearrange, repeat
from omegaconf import ListConfig, OmegaConf
from PIL import Image, ImageSequence
from rembg import remove
from torch import autocast
from torchvision.transforms import ToTensor
from scripts.util.detection.nsfw_and_watermark_dectection import DeepFloydDataFiltering
from sgm.modules.autoencoding.temporal_ae import VideoDecoder
from sgm.modules.diffusionmodules.guiders import (
@@ -34,6 +31,8 @@ from sgm.modules.diffusionmodules.sampling import (
LinearMultistepSampler,
)
from sgm.util import default, instantiate_from_config
from torch import autocast
from torchvision.transforms import ToTensor
def load_module_gpu(model):
@@ -165,7 +164,16 @@ def read_video(
return images_v0
def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, output_folder=None, image_frame_ratio = 0.917):
def preprocess_video(
input_path,
remove_bg=False,
n_frames=21,
W=576,
H=576,
output_folder=None,
image_frame_ratio=0.917,
base_count=0,
):
print(f"preprocess {input_path}")
if output_folder is None:
output_folder = os.path.dirname(input_path)
@@ -199,7 +207,9 @@ def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, out
images = [Image.open(img_path) for img_path in all_img_paths]
if len(images) != n_frames:
raise ValueError(f"Input video contains {len(images)} frames, fewer than {n_frames} frames.")
raise ValueError(
f"Input video contains {len(images)} frames, fewer than {n_frames} frames."
)
# Remove background
for i, image in enumerate(images):
@@ -226,18 +236,28 @@ def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, out
else:
# assume the input image has white background
ret, mask = cv2.threshold(
(np.array(image).mean(-1) <= white_thresh).astype(np.uint8) * 255, 0, 255, cv2.THRESH_BINARY
(np.array(image).mean(-1) <= white_thresh).astype(np.uint8) * 255,
0,
255,
cv2.THRESH_BINARY,
)
x, y, w, h = cv2.boundingRect(mask)
box_coord[0] = min(box_coord[0], x)
box_coord[1] = min(box_coord[1], y)
box_coord[2] = max(box_coord[2], x + w)
box_coord[3] = max(box_coord[3], y + h)
box_square = max(original_center[0] - box_coord[0], original_center[1] - box_coord[1])
box_square = max(
original_center[0] - box_coord[0], original_center[1] - box_coord[1]
)
box_square = max(box_square, box_coord[2] - original_center[0])
box_square = max(box_square, box_coord[3] - original_center[1])
x, y, w, h = original_center[0] - box_square, original_center[1] - box_square, 2 * box_square, 2 * box_square
x, y = max(0, original_center[0] - box_square), max(
0, original_center[1] - box_square
)
w, h = min(image_arr.shape[0], 2 * box_square), min(
image_arr.shape[1], 2 * box_square
)
box_size = box_square * 2
for image in images:
@@ -245,15 +265,15 @@ def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, out
image = image.convert("RGBA")
image_arr = np.array(image)
side_len = (
int(box_size / image_frame_ratio)
if image_frame_ratio is not None
else in_w
int(box_size / image_frame_ratio) if image_frame_ratio is not None else in_w
)
padded_image = np.zeros((side_len, side_len, 4), dtype=np.uint8)
center = side_len // 2
box_size_w = min(w, box_size)
box_size_h = min(h, box_size)
padded_image[
center - box_size // 2 : center - box_size // 2 + box_size,
center - box_size // 2 : center - box_size // 2 + box_size,
center - box_size_w // 2 : center - box_size_w // 2 + box_size_w,
center - box_size_h // 2 : center - box_size_h // 2 + box_size_h,
] = image_arr[x : x + w, y : y + h]
rgba = Image.fromarray(padded_image).resize((W, H), Image.LANCZOS)
@@ -261,14 +281,14 @@ def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, out
rgba_arr = np.array(rgba) / 255.0
rgb = rgba_arr[..., :3] * rgba_arr[..., -1:] + (1 - rgba_arr[..., -1:])
image = (rgb * 255).astype(np.uint8)
images_v0.append(image)
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 12
processed_file = os.path.join(output_folder, f"{base_count:06d}_process_input.mp4")
imageio.mimwrite(processed_file, images_v0, fps=10)
return processed_file
def sample_sv3d(
image,
num_frames: Optional[int] = None, # 21 for SV3D
@@ -326,6 +346,7 @@ def sample_sv3d(
with torch.no_grad():
with torch.autocast(device):
load_module_gpu(model.conditioner)
batch, batch_uc = get_batch_sv3d(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
@@ -341,6 +362,7 @@ def sample_sv3d(
"cond_frames_without_noise",
],
)
unload_module_gpu(model.conditioner)
for k in ["crossattn", "concat"]:
uc[k] = repeat(uc[k], "b ... -> b t ...", t=num_frames)
@@ -361,11 +383,17 @@ def sample_sv3d(
model.model, input, sigma, c, **additional_model_inputs
)
load_module_gpu(model.model)
load_module_gpu(model.denoiser)
samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
unload_module_gpu(model.model)
unload_module_gpu(model.denoiser)
unload_module_gpu(model.model)
load_module_gpu(model.first_stage_model)
model.en_and_decode_n_samples_a_time = decoding_t
samples_x = model.decode_first_stage(samples_z)
unload_module_gpu(model.first_stage_model)
samples_x[-1:] = value_dict["cond_frames_without_noise"]
samples = torch.clamp(samples_x, min=-1.0, max=1.0)
@@ -373,13 +401,17 @@ def sample_sv3d(
return samples
def decode_latents(model, samples_z, img_matrix, frame_indices, view_indices, timesteps):
def decode_latents(
model, samples_z, img_matrix, frame_indices, view_indices, timesteps
):
load_module_gpu(model.first_stage_model)
for t in frame_indices:
for v in view_indices:
if t != 0 and v != 0:
if True: # t != 0 and v != 0:
if isinstance(model.first_stage_model.decoder, VideoDecoder):
samples_x = model.decode_first_stage(samples_z[t, v][None], timesteps=timesteps)
samples_x = model.decode_first_stage(
samples_z[t, v][None], timesteps=timesteps
)
else:
samples_x = model.decode_first_stage(samples_z[t, v][None])
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
@@ -555,12 +587,15 @@ def get_guider_no_st(options, key):
}
elif guider == "SpatiotemporalPredictionGuider":
max_scale = options.get("cfg", 1.5)
min_scale = options.get("min_cfg", 1.0)
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.SpatiotemporalPredictionGuider",
"params": {
"max_scale": max_scale,
"min_scale": min_scale,
"num_frames": options["num_frames"],
"num_views": options["num_views"],
**additional_guider_kwargs,
},
}
@@ -652,7 +687,7 @@ def init_sampling_no_st(
options = {} if options is None else options
num_rows, num_cols = 1, 1
steps = options.get("num_steps", 40)
steps = options.get("num_steps", 50)
sampler = [
"EulerEDMSampler",
"HeunEDMSampler",
@@ -688,6 +723,7 @@ def run_img2vid(
cond_motion=None,
cond_view=None,
decoding_t=None,
cond_mv=True,
):
options = version_dict["options"]
H = version_dict["H"]
@@ -714,7 +750,10 @@ def run_img2vid(
value_dict["is_image"] = 0
value_dict["is_webvid"] = 0
value_dict["image_only_indicator"] = 0
if cond_mv:
value_dict["image_only_indicator"] = 1.0
else:
value_dict["image_only_indicator"] = 0.0
cond_aug = 0.00
if cond_motion is not None:
@@ -722,8 +761,6 @@ def run_img2vid(
value_dict["cond_frames"] = (
cond_motion[:, None].repeat(1, cond_view.shape[0], 1, 1, 1).flatten(0, 1)
)
value_dict["cond_motion"] = cond_motion
value_dict["cond_view"] = cond_view
else:
value_dict["cond_frames_without_noise"] = image
value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
@@ -760,46 +797,112 @@ def run_img2vid(
return samples
def prepare_inputs(frame_indices, img_matrix, v0, view_indices, model, version_dict, seed, polars, azims):
load_module_gpu(model.conditioner)
def prepare_inputs_forward_backward(
img_matrix,
view_indices,
frame_indices,
v0,
t0,
t1,
model,
version_dict,
seed,
polars,
azims,
):
# forward sampling
forward_frame_indices = frame_indices.copy()
image = img_matrix[t0][v0]
cond_motion = torch.cat([img_matrix[t][v0] for t in forward_frame_indices], 0)
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
forward_inputs = prepare_sampling(
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
# backward sampling
backward_frame_indices = frame_indices[::-1].copy()
image = img_matrix[t1][v0]
cond_motion = torch.cat([img_matrix[t][v0] for t in backward_frame_indices], 0)
cond_view = torch.cat([img_matrix[t1][v] for v in view_indices], 0)
backward_inputs = prepare_sampling(
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
return (
forward_inputs,
forward_frame_indices,
backward_inputs,
backward_frame_indices,
)
def prepare_inputs(
frame_indices,
img_matrix,
v0,
view_indices,
model,
version_dict,
seed,
polars,
azims,
):
load_module_gpu(model.conditioner)
# forward sampling
forward_frame_indices = frame_indices.copy()
t0 = forward_frame_indices[0]
image = img_matrix[t0][v0]
cond_motion = torch.cat([img_matrix[t][v0] for t in forward_frame_indices], 0)
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
forward_inputs = prepare_sampling(
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
# backward sampling
backward_frame_indices = frame_indices[
::-1
].copy()
backward_frame_indices = frame_indices[::-1].copy()
t0 = backward_frame_indices[0]
image = img_matrix[t0][v0]
cond_motion = torch.cat([img_matrix[t][v0] for t in backward_frame_indices], 0)
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
backward_inputs = prepare_sampling(
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
)
unload_module_gpu(model.conditioner)
return forward_inputs, forward_frame_indices, backward_inputs, backward_frame_indices
return (
forward_inputs,
forward_frame_indices,
backward_inputs,
backward_frame_indices,
)
def do_sample(
model,
@@ -854,6 +957,10 @@ def do_sample(
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
)
if value_dict["image_only_indicator"] == 0:
c["cond_view"] *= 0
uc["cond_view"] *= 0
additional_model_inputs = {}
for k in batch2model_input:
if k == "image_only_indicator":
@@ -869,9 +976,12 @@ def do_sample(
SpatiotemporalPredictionGuider,
),
):
additional_model_inputs[k] = torch.zeros(
num_samples[0] * 2, num_samples[1]
).to("cuda")
additional_model_inputs[k] = (
torch.zeros(num_samples[0] * 2, num_samples[1]).to(
"cuda"
)
+ value_dict["image_only_indicator"]
)
else:
additional_model_inputs[k] = torch.zeros(num_samples).to(
"cuda"
@@ -886,11 +996,13 @@ def do_sample(
return model.denoiser(
model.model, input, sigma, c, **additional_model_inputs
)
load_module_gpu(model.model)
load_module_gpu(model.denoiser)
samples_z = sampler(denoiser, randn, cond=c, uc=uc)
unload_module_gpu(model.model)
unload_module_gpu(model.denoiser)
unload_module_gpu(model.model)
load_module_gpu(model.first_stage_model)
if isinstance(model.first_stage_model.decoder, VideoDecoder):
samples_x = model.decode_first_stage(
@@ -900,11 +1012,13 @@ def do_sample(
samples_x = model.decode_first_stage(samples_z)
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
unload_module_gpu(model.first_stage_model)
if filter is not None:
samples = filter(samples)
if return_latents:
return samples, samples_z
return samples
@@ -931,6 +1045,7 @@ def prepare_sampling_(
num_samples = [num_samples, T]
else:
num_samples = [num_samples]
load_module_gpu(model.conditioner)
batch, batch_uc = get_batch(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
@@ -944,6 +1059,8 @@ def prepare_sampling_(
force_uc_zero_embeddings=force_uc_zero_embeddings,
force_cond_zero_embeddings=force_cond_zero_embeddings,
)
unload_module_gpu(model.conditioner)
for k in c:
if not k == "crossattn":
c[k], uc[k] = map(
@@ -965,19 +1082,25 @@ def prepare_sampling_(
SpatiotemporalPredictionGuider,
),
):
additional_model_inputs[k] = torch.zeros(
num_samples[0] * 2, num_samples[1]
).to("cuda")
additional_model_inputs[k] = (
torch.zeros(num_samples[0] * 2, num_samples[1]).to(
"cuda"
)
+ value_dict["image_only_indicator"]
)
else:
additional_model_inputs[k] = torch.zeros(num_samples).to(
"cuda"
)
else:
additional_model_inputs[k] = batch[k]
return c, uc, additional_model_inputs
def do_sample_per_step(model, sampler, noisy_latents, c, uc, step, additional_model_inputs):
def do_sample_per_step(
model, sampler, noisy_latents, c, uc, step, additional_model_inputs
):
precision_scope = autocast
with torch.no_grad():
with precision_scope("cuda"):
@@ -1015,6 +1138,8 @@ def do_sample_per_step(model, sampler, noisy_latents, c, uc, step, additional_mo
uc,
gamma,
)
unload_module_gpu(model.denoiser)
unload_module_gpu(model.model)
return samples_z
@@ -1053,7 +1178,7 @@ def prepare_sampling(
value_dict["is_image"] = 0
value_dict["is_webvid"] = 0
value_dict["image_only_indicator"] = 0
value_dict["image_only_indicator"] = 1.0
cond_aug = 0.00
if cond_motion is not None:
@@ -1061,8 +1186,6 @@ def prepare_sampling(
value_dict["cond_frames"] = (
cond_motion[:, None].repeat(1, cond_view.shape[0], 1, 1, 1).flatten(0, 1)
)
value_dict["cond_motion"] = cond_motion
value_dict["cond_view"] = cond_view
else:
value_dict["cond_frames_without_noise"] = image
value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
@@ -1073,8 +1196,6 @@ def prepare_sampling(
value_dict["cond_motion"] = cond_motion
value_dict["cond_view"] = cond_view
# seed_everything(seed)
options["num_frames"] = T
sampler, num_rows, num_cols = init_sampling_no_st(options=options)
num_samples = num_rows * num_cols
@@ -1269,6 +1390,7 @@ def load_model(
num_frames: int,
num_steps: int,
verbose: bool = False,
ckpt_path: str = None,
):
config = OmegaConf.load(config)
if device == "cuda":
@@ -1281,6 +1403,8 @@ def load_model(
config.model.params.sampler_config.params.guider_config.params.num_frames = (
num_frames
)
if ckpt_path is not None:
config.model.params.ckpt_path = ckpt_path
if device == "cuda":
with torch.device(device):
model = instantiate_from_config(config.model).to(device).eval()

View File

@@ -23,6 +23,7 @@ model:
attention_resolutions: [4, 2, 1]
channel_mult: [1, 2, 4, 4]
context_dim: 1024
motion_context_dim: 4
extra_ff_mix_layer: True
in_channels: 8
legacy: False

View File

@@ -0,0 +1,208 @@
N_TIME: 12
N_VIEW: 4
N_FRAMES: 48
model:
target: sgm.models.diffusion.DiffusionEngine
params:
scale_factor: 0.18215
en_and_decode_n_samples_a_time: 8
disable_first_stage_autocast: True
ckpt_path: checkpoints/sv4d2.safetensors
denoiser_config:
target: sgm.modules.diffusionmodules.denoiser.Denoiser
params:
scaling_config:
target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
network_config:
target: sgm.modules.diffusionmodules.video_model.SpatialUNetModelWithTime
params:
adm_in_channels: 1280
attention_resolutions: [4, 2, 1]
channel_mult: [1, 2, 4, 4]
context_dim: 1024
motion_context_dim: 4
extra_ff_mix_layer: True
in_channels: 8
legacy: False
model_channels: 320
num_classes: sequential
num_head_channels: 64
num_res_blocks: 2
out_channels: 4
replicate_time_mix_bug: True
spatial_transformer_attn_type: softmax-xformers
time_block_merge_factor: 0.0
time_block_merge_strategy: learned_with_images
time_kernel_size: [3, 1, 1]
time_mix_legacy: False
transformer_depth: 1
use_checkpoint: False
use_linear_in_transformer: True
use_spatial_context: True
use_spatial_transformer: True
separate_motion_merge_factor: True
use_motion_attention: True
use_3d_attention: True
use_camera_emb: True
conditioner_config:
target: sgm.modules.GeneralConditioner
params:
emb_models:
- input_key: cond_frames_without_noise
target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
is_trainable: False
params:
n_cond_frames: ${N_TIME}
n_copies: 1
open_clip_embedding_config:
target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
params:
freeze: True
- input_key: cond_frames
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
is_trainable: False
params:
is_ae: True
n_cond_frames: ${N_FRAMES}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
embed_dim: 4
lossconfig:
target: torch.nn.Identity
monitor: val/rec_loss
sigma_cond_config:
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 256
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
- input_key: polar_rad
is_trainable: False
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 512
- input_key: azimuth_rad
is_trainable: False
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 512
- input_key: cond_view
is_trainable: False
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
params:
is_ae: True
n_cond_frames: ${N_VIEW}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
lossconfig:
target: torch.nn.Identity
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
- input_key: cond_motion
is_trainable: False
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
params:
is_ae: True
n_cond_frames: ${N_TIME}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
lossconfig:
target: torch.nn.Identity
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
first_stage_config:
target: sgm.models.autoencoder.AutoencodingEngine
params:
loss_config:
target: torch.nn.Identity
regularizer_config:
target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
encoder_config:
target: torch.nn.Identity
decoder_config:
target: sgm.modules.diffusionmodules.model.Decoder
params:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
sampler_config:
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
params:
num_steps: 50
discretization_config:
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
params:
sigma_max: 500.0
guider_config:
target: sgm.modules.diffusionmodules.guiders.SpatiotemporalPredictionGuider
params:
max_scale: 1.5
min_scale: 1.5
num_frames: ${N_FRAMES}
num_views: ${N_VIEW}
additional_cond_keys: [ cond_view, cond_motion ]

View File

@@ -0,0 +1,208 @@
N_TIME: 5
N_VIEW: 8
N_FRAMES: 40
model:
target: sgm.models.diffusion.DiffusionEngine
params:
scale_factor: 0.18215
en_and_decode_n_samples_a_time: 8
disable_first_stage_autocast: True
ckpt_path: checkpoints/sv4d2_8views.safetensors
denoiser_config:
target: sgm.modules.diffusionmodules.denoiser.Denoiser
params:
scaling_config:
target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
network_config:
target: sgm.modules.diffusionmodules.video_model.SpatialUNetModelWithTime
params:
adm_in_channels: 1280
attention_resolutions: [4, 2, 1]
channel_mult: [1, 2, 4, 4]
context_dim: 1024
motion_context_dim: 4
extra_ff_mix_layer: True
in_channels: 8
legacy: False
model_channels: 320
num_classes: sequential
num_head_channels: 64
num_res_blocks: 2
out_channels: 4
replicate_time_mix_bug: True
spatial_transformer_attn_type: softmax-xformers
time_block_merge_factor: 0.0
time_block_merge_strategy: learned_with_images
time_kernel_size: [3, 1, 1]
time_mix_legacy: False
transformer_depth: 1
use_checkpoint: False
use_linear_in_transformer: True
use_spatial_context: True
use_spatial_transformer: True
separate_motion_merge_factor: True
use_motion_attention: True
use_3d_attention: False
use_camera_emb: True
conditioner_config:
target: sgm.modules.GeneralConditioner
params:
emb_models:
- input_key: cond_frames_without_noise
target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
is_trainable: False
params:
n_cond_frames: ${N_TIME}
n_copies: 1
open_clip_embedding_config:
target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
params:
freeze: True
- input_key: cond_frames
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
is_trainable: False
params:
is_ae: True
n_cond_frames: ${N_FRAMES}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
embed_dim: 4
lossconfig:
target: torch.nn.Identity
monitor: val/rec_loss
sigma_cond_config:
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 256
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
- input_key: polar_rad
is_trainable: False
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 512
- input_key: azimuth_rad
is_trainable: False
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
params:
outdim: 512
- input_key: cond_view
is_trainable: False
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
params:
is_ae: True
n_cond_frames: ${N_VIEW}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
lossconfig:
target: torch.nn.Identity
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
- input_key: cond_motion
is_trainable: False
target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
params:
is_ae: True
n_cond_frames: ${N_TIME}
n_copies: 1
encoder_config:
target: sgm.models.autoencoder.AutoencoderKLModeOnly
params:
embed_dim: 4
monitor: val/rec_loss
ddconfig:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
lossconfig:
target: torch.nn.Identity
sigma_sampler_config:
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
first_stage_config:
target: sgm.models.autoencoder.AutoencodingEngine
params:
loss_config:
target: torch.nn.Identity
regularizer_config:
target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
encoder_config:
target: torch.nn.Identity
decoder_config:
target: sgm.modules.diffusionmodules.model.Decoder
params:
attn_resolutions: []
attn_type: vanilla-xformers
ch: 128
ch_mult: [1, 2, 4, 4]
double_z: True
dropout: 0.0
in_channels: 3
num_res_blocks: 2
out_ch: 3
resolution: 256
z_channels: 4
sampler_config:
target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
params:
num_steps: 50
discretization_config:
target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
params:
sigma_max: 500.0
guider_config:
target: sgm.modules.diffusionmodules.guiders.SpatiotemporalPredictionGuider
params:
max_scale: 2.0
min_scale: 1.5
num_frames: ${N_FRAMES}
num_views: ${N_VIEW}
additional_cond_keys: [ cond_view, cond_motion ]

View File

@@ -163,7 +163,7 @@ def sample(
else:
with Image.open(input_img_path) as image:
if image.mode == "RGBA":
input_image = image.convert("RGB")
image = image.convert("RGB")
w, h = image.size
if h % 64 != 0 or w % 64 != 0:
@@ -172,7 +172,8 @@ def sample(
print(
f"WARNING: Your image is of size {h}x{w} which is not divisible by 64. We are resizing to {height}x{width}!"
)
input_image = np.array(image)
image = ToTensor()(input_image)
image = image * 2.0 - 1.0

View File

@@ -27,7 +27,7 @@ from scripts.demo.sv4d_helpers import (
def sample(
input_path: str = "assets/test_video.mp4", # Can either be image file or folder with image files
input_path: str = "assets/sv4d_videos/test_video1.mp4", # Can either be image file or folder with image files
output_folder: Optional[str] = "outputs/sv4d",
num_steps: Optional[int] = 20,
sv3d_version: str = "sv3d_u", # sv3d_u or sv3d_p
@@ -71,7 +71,8 @@ def sample(
"f": F,
"options": {
"discretization": 1,
"cfg": 3.0,
"cfg": 2.0,
"num_views": V,
"sigma_min": 0.002,
"sigma_max": 700.0,
"rho": 7.0,
@@ -94,6 +95,7 @@ def sample(
# Read input video frames i.e. images at view 0
print(f"Reading {input_path}")
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 11
processed_input_path = preprocess_video(
input_path,
remove_bg=remove_bg,
@@ -102,6 +104,7 @@ def sample(
H=H,
output_folder=output_folder,
image_frame_ratio=image_frame_ratio,
base_count=base_count,
)
images_v0 = read_video(processed_input_path, n_frames=n_frames, device=device)
@@ -145,15 +148,14 @@ def sample(
for t in range(n_frames):
img_matrix[t][0] = images_v0[t]
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 12
save_video(
os.path.join(output_folder, f"{base_count:06d}_t000.mp4"),
img_matrix[0],
)
save_video(
os.path.join(output_folder, f"{base_count:06d}_v000.mp4"),
[img_matrix[t][0] for t in range(n_frames)],
)
# save_video(
# os.path.join(output_folder, f"{base_count:06d}_v000.mp4"),
# [img_matrix[t][0] for t in range(n_frames)],
# )
# Load SV4D model
model, filter = load_model(

View File

@@ -0,0 +1,235 @@
import os
import sys
from glob import glob
from typing import List, Optional
from tqdm import tqdm
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "../../")))
import numpy as np
import torch
from fire import Fire
from scripts.demo.sv4d_helpers import (
load_model,
preprocess_video,
read_video,
run_img2vid,
save_video,
)
from sgm.modules.encoders.modules import VideoPredictionEmbedderWithEncoder
sv4d2_configs = {
"sv4d2": {
"T": 12, # number of frames per sample
"V": 4, # number of views per sample
"model_config": "scripts/sampling/configs/sv4d2.yaml",
"version_dict": {
"T": 12 * 4,
"options": {
"discretization": 1,
"cfg": 2.0,
"min_cfg": 2.0,
"num_views": 4,
"sigma_min": 0.002,
"sigma_max": 700.0,
"rho": 7.0,
"guider": 2,
"force_uc_zero_embeddings": [
"cond_frames",
"cond_frames_without_noise",
"cond_view",
"cond_motion",
],
"additional_guider_kwargs": {
"additional_cond_keys": ["cond_view", "cond_motion"]
},
},
},
},
"sv4d2_8views": {
"T": 5, # number of frames per sample
"V": 8, # number of views per sample
"model_config": "scripts/sampling/configs/sv4d2_8views.yaml",
"version_dict": {
"T": 5 * 8,
"options": {
"discretization": 1,
"cfg": 2.5,
"min_cfg": 1.5,
"num_views": 8,
"sigma_min": 0.002,
"sigma_max": 700.0,
"rho": 7.0,
"guider": 5,
"force_uc_zero_embeddings": [
"cond_frames",
"cond_frames_without_noise",
"cond_view",
"cond_motion",
],
"additional_guider_kwargs": {
"additional_cond_keys": ["cond_view", "cond_motion"]
},
},
},
},
}
def sample(
input_path: str = "assets/sv4d_videos/camel.gif", # Can either be image file or folder with image files
model_path: Optional[str] = "checkpoints/sv4d2.safetensors",
output_folder: Optional[str] = "outputs",
num_steps: Optional[int] = 50,
img_size: int = 576, # image resolution
n_frames: int = 21, # number of input and output video frames
seed: int = 23,
encoding_t: int = 8, # Number of frames encoded at a time! This eats most VRAM. Reduce if necessary.
decoding_t: int = 4, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
device: str = "cuda",
elevations_deg: Optional[List[float]] = 0.0,
azimuths_deg: Optional[List[float]] = None,
image_frame_ratio: Optional[float] = 0.9,
verbose: Optional[bool] = False,
remove_bg: bool = False,
):
"""
Simple script to generate multiple novel-view videos conditioned on a video `input_path` or multiple frames, one for each
image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t` and `encoding_t`.
"""
# Set model config
assert os.path.basename(model_path) in [
"sv4d2.safetensors",
"sv4d2_8views.safetensors",
]
sv4d2_model = os.path.splitext(os.path.basename(model_path))[0]
config = sv4d2_configs[sv4d2_model]
print(sv4d2_model, config)
T = config["T"]
V = config["V"]
model_config = config["model_config"]
version_dict = config["version_dict"]
F = 8 # vae factor to downsize image->latent
C = 4
H, W = img_size, img_size
n_views = V + 1 # number of output video views (1 input view + 8 novel views)
subsampled_views = np.arange(n_views)
version_dict["H"] = H
version_dict["W"] = W
version_dict["C"] = C
version_dict["f"] = F
version_dict["options"]["num_steps"] = num_steps
torch.manual_seed(seed)
output_folder = os.path.join(output_folder, sv4d2_model)
os.makedirs(output_folder, exist_ok=True)
# Read input video frames i.e. images at view 0
print(f"Reading {input_path}")
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // n_views
processed_input_path = preprocess_video(
input_path,
remove_bg=remove_bg,
n_frames=n_frames,
W=W,
H=H,
output_folder=output_folder,
image_frame_ratio=image_frame_ratio,
base_count=base_count,
)
images_v0 = read_video(processed_input_path, n_frames=n_frames, device=device)
images_t0 = torch.zeros(n_views, 3, H, W).float().to(device)
# Get camera viewpoints
if isinstance(elevations_deg, float) or isinstance(elevations_deg, int):
elevations_deg = [elevations_deg] * n_views
assert (
len(elevations_deg) == n_views
), f"Please provide 1 value, or a list of {n_views} values for elevations_deg! Given {len(elevations_deg)}"
if azimuths_deg is None:
# azimuths_deg = np.linspace(0, 360, n_views + 1)[1:] % 360
azimuths_deg = (
np.array([0, 60, 120, 180, 240])
if sv4d2_model == "sv4d2"
else np.array([0, 30, 75, 120, 165, 210, 255, 300, 330])
)
assert (
len(azimuths_deg) == n_views
), f"Please provide a list of {n_views} values for azimuths_deg! Given {len(azimuths_deg)}"
polars_rad = np.array([np.deg2rad(90 - e) for e in elevations_deg])
azimuths_rad = np.array(
[np.deg2rad((a - azimuths_deg[-1]) % 360) for a in azimuths_deg]
)
# Initialize image matrix
img_matrix = [[None] * n_views for _ in range(n_frames)]
for i, v in enumerate(subsampled_views):
img_matrix[0][i] = images_t0[v].unsqueeze(0)
for t in range(n_frames):
img_matrix[t][0] = images_v0[t]
# Load SV4D++ model
model, _ = load_model(
model_config,
device,
version_dict["T"],
num_steps,
verbose,
model_path,
)
model.en_and_decode_n_samples_a_time = decoding_t
for emb in model.conditioner.embedders:
if isinstance(emb, VideoPredictionEmbedderWithEncoder):
emb.en_and_decode_n_samples_a_time = encoding_t
# Sampling novel-view videos
v0 = 0
view_indices = np.arange(V) + 1
t0_list = (
range(0, n_frames, T)
if sv4d2_model == "sv4d2"
else range(0, n_frames - T + 1, T - 1)
)
for t0 in tqdm(t0_list):
if t0 + T > n_frames:
t0 = n_frames - T
frame_indices = t0 + np.arange(T)
print(f"Sampling frames {frame_indices}")
image = img_matrix[t0][v0]
cond_motion = torch.cat([img_matrix[t][v0] for t in frame_indices], 0)
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
polars = polars_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
azims = azimuths_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
polars = (polars - polars_rad[v0] + torch.pi / 2) % (torch.pi * 2)
azims = (azims - azimuths_rad[v0]) % (torch.pi * 2)
cond_mv = False if t0 == 0 else True
samples = run_img2vid(
version_dict,
model,
image,
seed,
polars,
azims,
cond_motion,
cond_view,
decoding_t,
cond_mv=cond_mv,
)
samples = samples.view(T, V, 3, H, W)
for i, t in enumerate(frame_indices):
for j, v in enumerate(view_indices):
img_matrix[t][v] = samples[i, j][None] * 2 - 1
# Save output videos
for v in view_indices:
vid_file = os.path.join(output_folder, f"{base_count:06d}_v{v:03d}.mp4")
print(f"Saving {vid_file}")
save_video(
vid_file,
[img_matrix[t][v] for t in range(n_frames) if img_matrix[t][v] is not None],
)
if __name__ == "__main__":
Fire(sample)