mirror of
https://github.com/Stability-AI/generative-models.git
synced 2025-12-19 22:34:22 +01:00
SV4D: reduce the memory consumption and speed up
This commit is contained in:
@@ -121,10 +121,6 @@ def save_video(file_name, imgs, fps=10):
|
|||||||
def read_video(
|
def read_video(
|
||||||
input_path: str,
|
input_path: str,
|
||||||
n_frames: int,
|
n_frames: int,
|
||||||
W: int,
|
|
||||||
H: int,
|
|
||||||
remove_bg: bool = False,
|
|
||||||
image_frame_ratio: Optional[float] = None,
|
|
||||||
device: str = "cuda",
|
device: str = "cuda",
|
||||||
):
|
):
|
||||||
path = Path(input_path)
|
path = Path(input_path)
|
||||||
@@ -158,47 +154,121 @@ def read_video(
|
|||||||
|
|
||||||
if len(images) < n_frames:
|
if len(images) < n_frames:
|
||||||
images = (images + images[::-1])[:n_frames]
|
images = (images + images[::-1])[:n_frames]
|
||||||
|
|
||||||
if len(images) != n_frames:
|
if len(images) != n_frames:
|
||||||
raise ValueError(f"Input video contains fewer than {n_frames} frames.")
|
raise ValueError(f"Input video contains fewer than {n_frames} frames.")
|
||||||
|
|
||||||
# Remove background and crop video frames
|
|
||||||
images_v0 = []
|
images_v0 = []
|
||||||
for t, image in enumerate(images):
|
|
||||||
if remove_bg:
|
for image in images:
|
||||||
if image.mode != "RGBA":
|
|
||||||
image.thumbnail([W, H], Image.Resampling.LANCZOS)
|
|
||||||
image = remove(image.convert("RGBA"), alpha_matting=True)
|
|
||||||
image_arr = np.array(image)
|
|
||||||
in_w, in_h = image_arr.shape[:2]
|
|
||||||
ret, mask = cv2.threshold(
|
|
||||||
np.array(image.split()[-1]), 0, 255, cv2.THRESH_BINARY
|
|
||||||
)
|
|
||||||
x, y, w, h = cv2.boundingRect(mask)
|
|
||||||
max_size = max(w, h)
|
|
||||||
if t == 0:
|
|
||||||
side_len = (
|
|
||||||
int(max_size / image_frame_ratio)
|
|
||||||
if image_frame_ratio is not None
|
|
||||||
else in_w
|
|
||||||
)
|
|
||||||
padded_image = np.zeros((side_len, side_len, 4), dtype=np.uint8)
|
|
||||||
center = side_len // 2
|
|
||||||
padded_image[
|
|
||||||
center - h // 2 : center - h // 2 + h,
|
|
||||||
center - w // 2 : center - w // 2 + w,
|
|
||||||
] = image_arr[y : y + h, x : x + w]
|
|
||||||
rgba = Image.fromarray(padded_image).resize((W, H), Image.LANCZOS)
|
|
||||||
rgba_arr = np.array(rgba) / 255.0
|
|
||||||
rgb = rgba_arr[..., :3] * rgba_arr[..., -1:] + (1 - rgba_arr[..., -1:])
|
|
||||||
image = Image.fromarray((rgb * 255).astype(np.uint8))
|
|
||||||
else:
|
|
||||||
image = image.convert("RGB").resize((W, H), Image.LANCZOS)
|
|
||||||
image = ToTensor()(image).unsqueeze(0).to(device)
|
image = ToTensor()(image).unsqueeze(0).to(device)
|
||||||
images_v0.append(image * 2.0 - 1.0)
|
images_v0.append(image * 2.0 - 1.0)
|
||||||
return images_v0
|
return images_v0
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_video(input_path, remove_bg=False, n_frames=21, W=576, H=576, output_folder=None, image_frame_ratio = 0.917):
|
||||||
|
print(f"preprocess {input_path}")
|
||||||
|
if output_folder is None:
|
||||||
|
output_folder = os.path.dirname(input_path)
|
||||||
|
path = Path(input_path)
|
||||||
|
is_video_file = False
|
||||||
|
all_img_paths = []
|
||||||
|
if path.is_file():
|
||||||
|
if any([input_path.endswith(x) for x in [".gif", ".mp4"]]):
|
||||||
|
is_video_file = True
|
||||||
|
else:
|
||||||
|
raise ValueError("Path is not a valid video file.")
|
||||||
|
elif path.is_dir():
|
||||||
|
all_img_paths = sorted(
|
||||||
|
[
|
||||||
|
f
|
||||||
|
for f in path.iterdir()
|
||||||
|
if f.is_file() and f.suffix.lower() in [".jpg", ".jpeg", ".png"]
|
||||||
|
]
|
||||||
|
)[:n_frames]
|
||||||
|
elif "*" in input_path:
|
||||||
|
all_img_paths = sorted(glob(input_path))[:n_frames]
|
||||||
|
else:
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
if is_video_file and input_path.endswith(".gif"):
|
||||||
|
images = read_gif(input_path, n_frames)[:n_frames]
|
||||||
|
elif is_video_file and input_path.endswith(".mp4"):
|
||||||
|
images = read_mp4(input_path, n_frames)[:n_frames]
|
||||||
|
else:
|
||||||
|
print(f"Loading {len(all_img_paths)} video frames...")
|
||||||
|
images = [Image.open(img_path) for img_path in all_img_paths]
|
||||||
|
|
||||||
|
if len(images) != n_frames:
|
||||||
|
raise ValueError(f"Input video contains {len(images)} frames, fewer than {n_frames} frames.")
|
||||||
|
|
||||||
|
# Remove background
|
||||||
|
for i, image in enumerate(images):
|
||||||
|
if remove_bg:
|
||||||
|
if image.mode == "RGBA":
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# image.thumbnail([W, H], Image.Resampling.LANCZOS)
|
||||||
|
image = remove(image.convert("RGBA"), alpha_matting=True)
|
||||||
|
images[i] = image
|
||||||
|
|
||||||
|
# Crop video frames, assume the object is already in the center of the image
|
||||||
|
white_thresh = 250
|
||||||
|
images_v0 = []
|
||||||
|
box_coord = [np.inf, np.inf, 0, 0]
|
||||||
|
for image in images:
|
||||||
|
image_arr = np.array(image)
|
||||||
|
in_w, in_h = image_arr.shape[:2]
|
||||||
|
original_center = (in_w // 2, in_h // 2)
|
||||||
|
if image.mode == "RGBA":
|
||||||
|
ret, mask = cv2.threshold(
|
||||||
|
np.array(image.split()[-1]), 0, 255, cv2.THRESH_BINARY
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# assume the input image has white background
|
||||||
|
ret, mask = cv2.threshold(
|
||||||
|
(np.array(image).mean(-1) <= white_thresh).astype(np.uint8) * 255, 0, 255, cv2.THRESH_BINARY
|
||||||
|
)
|
||||||
|
|
||||||
|
x, y, w, h = cv2.boundingRect(mask)
|
||||||
|
box_coord[0] = min(box_coord[0], x)
|
||||||
|
box_coord[1] = min(box_coord[1], y)
|
||||||
|
box_coord[2] = max(box_coord[2], x + w)
|
||||||
|
box_coord[3] = max(box_coord[3], y + h)
|
||||||
|
box_square = max(original_center[0] - box_coord[0], original_center[1] - box_coord[1])
|
||||||
|
box_square = max(box_square, box_coord[2] - original_center[0])
|
||||||
|
box_square = max(box_square, box_coord[3] - original_center[1])
|
||||||
|
x, y, w, h = original_center[0] - box_square, original_center[1] - box_square, 2 * box_square, 2 * box_square
|
||||||
|
box_size = box_square * 2
|
||||||
|
|
||||||
|
for image in images:
|
||||||
|
if image.mode == "RGB":
|
||||||
|
image = image.convert("RGBA")
|
||||||
|
image_arr = np.array(image)
|
||||||
|
side_len = (
|
||||||
|
int(box_size / image_frame_ratio)
|
||||||
|
if image_frame_ratio is not None
|
||||||
|
else in_w
|
||||||
|
)
|
||||||
|
padded_image = np.zeros((side_len, side_len, 4), dtype=np.uint8)
|
||||||
|
center = side_len // 2
|
||||||
|
padded_image[
|
||||||
|
center - box_size // 2 : center - box_size // 2 + box_size,
|
||||||
|
center - box_size // 2 : center - box_size // 2 + box_size,
|
||||||
|
] = image_arr[x : x + w, y : y + h]
|
||||||
|
|
||||||
|
rgba = Image.fromarray(padded_image).resize((W, H), Image.LANCZOS)
|
||||||
|
# rgba = image.resize((W, H), Image.LANCZOS)
|
||||||
|
rgba_arr = np.array(rgba) / 255.0
|
||||||
|
rgb = rgba_arr[..., :3] * rgba_arr[..., -1:] + (1 - rgba_arr[..., -1:])
|
||||||
|
image = (rgb * 255).astype(np.uint8)
|
||||||
|
|
||||||
|
images_v0.append(image)
|
||||||
|
|
||||||
|
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 10
|
||||||
|
processed_file = os.path.join(output_folder, f"{base_count:06d}_process_input.mp4")
|
||||||
|
imageio.mimwrite(processed_file, images_v0, fps=10)
|
||||||
|
return processed_file
|
||||||
|
|
||||||
def sample_sv3d(
|
def sample_sv3d(
|
||||||
image,
|
image,
|
||||||
num_frames: Optional[int] = None, # 21 for SV3D
|
num_frames: Optional[int] = None, # 21 for SV3D
|
||||||
@@ -212,26 +282,32 @@ def sample_sv3d(
|
|||||||
polar_rad: Optional[Union[float, List[float]]] = None,
|
polar_rad: Optional[Union[float, List[float]]] = None,
|
||||||
azim_rad: Optional[List[float]] = None,
|
azim_rad: Optional[List[float]] = None,
|
||||||
verbose: Optional[bool] = False,
|
verbose: Optional[bool] = False,
|
||||||
|
sv3d_model=None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
|
Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
|
||||||
image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
|
image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if version == "sv3d_u":
|
if sv3d_model is None:
|
||||||
model_config = "scripts/sampling/configs/sv3d_u.yaml"
|
if version == "sv3d_u":
|
||||||
elif version == "sv3d_p":
|
model_config = "scripts/sampling/configs/sv3d_u.yaml"
|
||||||
model_config = "scripts/sampling/configs/sv3d_p.yaml"
|
elif version == "sv3d_p":
|
||||||
else:
|
model_config = "scripts/sampling/configs/sv3d_p.yaml"
|
||||||
raise ValueError(f"Version {version} does not exist.")
|
else:
|
||||||
|
raise ValueError(f"Version {version} does not exist.")
|
||||||
|
|
||||||
model, filter = load_model(
|
model, filter = load_model(
|
||||||
model_config,
|
model_config,
|
||||||
device,
|
device,
|
||||||
num_frames,
|
num_frames,
|
||||||
num_steps,
|
num_steps,
|
||||||
verbose,
|
verbose,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
model = sv3d_model
|
||||||
|
|
||||||
|
load_module_gpu(model)
|
||||||
|
|
||||||
H, W = image.shape[2:]
|
H, W = image.shape[2:]
|
||||||
F = 8
|
F = 8
|
||||||
@@ -286,25 +362,32 @@ def sample_sv3d(
|
|||||||
)
|
)
|
||||||
|
|
||||||
samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
|
samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
|
||||||
|
unload_module_gpu(model.model)
|
||||||
|
unload_module_gpu(model.denoiser)
|
||||||
model.en_and_decode_n_samples_a_time = decoding_t
|
model.en_and_decode_n_samples_a_time = decoding_t
|
||||||
samples_x = model.decode_first_stage(samples_z)
|
samples_x = model.decode_first_stage(samples_z)
|
||||||
samples_x[-1:] = value_dict["cond_frames_without_noise"]
|
samples_x[-1:] = value_dict["cond_frames_without_noise"]
|
||||||
samples = torch.clamp(samples_x, min=-1.0, max=1.0)
|
samples = torch.clamp(samples_x, min=-1.0, max=1.0)
|
||||||
|
|
||||||
return samples
|
unload_module_gpu(model)
|
||||||
|
|
||||||
|
|
||||||
def decode_latents(model, samples_z, timesteps):
|
|
||||||
load_module_gpu(model.first_stage_model)
|
|
||||||
if isinstance(model.first_stage_model.decoder, VideoDecoder):
|
|
||||||
samples_x = model.decode_first_stage(samples_z, timesteps=timesteps)
|
|
||||||
else:
|
|
||||||
samples_x = model.decode_first_stage(samples_z)
|
|
||||||
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
|
||||||
unload_module_gpu(model.first_stage_model)
|
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
|
|
||||||
|
def decode_latents(model, samples_z, img_matrix, frame_indices, view_indices, timesteps):
|
||||||
|
load_module_gpu(model.first_stage_model)
|
||||||
|
for t in frame_indices:
|
||||||
|
for v in view_indices:
|
||||||
|
if t != 0 and v != 0:
|
||||||
|
if isinstance(model.first_stage_model.decoder, VideoDecoder):
|
||||||
|
samples_x = model.decode_first_stage(samples_z[t, v][None], timesteps=timesteps)
|
||||||
|
else:
|
||||||
|
samples_x = model.decode_first_stage(samples_z[t, v][None])
|
||||||
|
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
|
img_matrix[t][v] = samples * 2 - 1
|
||||||
|
unload_module_gpu(model.first_stage_model)
|
||||||
|
return img_matrix
|
||||||
|
|
||||||
|
|
||||||
def init_embedder_options_no_st(keys, init_dict, prompt=None, negative_prompt=None):
|
def init_embedder_options_no_st(keys, init_dict, prompt=None, negative_prompt=None):
|
||||||
# Hardcoded demo settings; might undergo some changes in the future
|
# Hardcoded demo settings; might undergo some changes in the future
|
||||||
|
|
||||||
@@ -604,6 +687,7 @@ def run_img2vid(
|
|||||||
azim_rad=np.linspace(0, 360, 21 + 1)[1:],
|
azim_rad=np.linspace(0, 360, 21 + 1)[1:],
|
||||||
cond_motion=None,
|
cond_motion=None,
|
||||||
cond_view=None,
|
cond_view=None,
|
||||||
|
decoding_t=None,
|
||||||
):
|
):
|
||||||
options = version_dict["options"]
|
options = version_dict["options"]
|
||||||
H = version_dict["H"]
|
H = version_dict["H"]
|
||||||
@@ -670,12 +754,53 @@ def run_img2vid(
|
|||||||
force_uc_zero_embeddings=options.get("force_uc_zero_embeddings", None),
|
force_uc_zero_embeddings=options.get("force_uc_zero_embeddings", None),
|
||||||
force_cond_zero_embeddings=options.get("force_cond_zero_embeddings", None),
|
force_cond_zero_embeddings=options.get("force_cond_zero_embeddings", None),
|
||||||
return_latents=False,
|
return_latents=False,
|
||||||
decoding_t=options.get("decoding_T", T),
|
decoding_t=decoding_t,
|
||||||
)
|
)
|
||||||
|
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_inputs(frame_indices, img_matrix, v0, view_indices, model, version_dict, seed, polars, azims):
|
||||||
|
load_module_gpu(model.conditioner)
|
||||||
|
|
||||||
|
forward_frame_indices = frame_indices.copy()
|
||||||
|
t0 = forward_frame_indices[0]
|
||||||
|
image = img_matrix[t0][v0]
|
||||||
|
cond_motion = torch.cat([img_matrix[t][v0] for t in forward_frame_indices], 0)
|
||||||
|
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
|
||||||
|
forward_inputs = prepare_sampling(
|
||||||
|
version_dict,
|
||||||
|
model,
|
||||||
|
image,
|
||||||
|
seed,
|
||||||
|
polars,
|
||||||
|
azims,
|
||||||
|
cond_motion,
|
||||||
|
cond_view,
|
||||||
|
)
|
||||||
|
|
||||||
|
# backward sampling
|
||||||
|
backward_frame_indices = frame_indices[
|
||||||
|
::-1
|
||||||
|
].copy()
|
||||||
|
t0 = backward_frame_indices[0]
|
||||||
|
image = img_matrix[t0][v0]
|
||||||
|
cond_motion = torch.cat([img_matrix[t][v0] for t in backward_frame_indices], 0)
|
||||||
|
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
|
||||||
|
backward_inputs = prepare_sampling(
|
||||||
|
version_dict,
|
||||||
|
model,
|
||||||
|
image,
|
||||||
|
seed,
|
||||||
|
polars,
|
||||||
|
azims,
|
||||||
|
cond_motion,
|
||||||
|
cond_view,
|
||||||
|
)
|
||||||
|
|
||||||
|
unload_module_gpu(model.conditioner)
|
||||||
|
return forward_inputs, forward_frame_indices, backward_inputs, backward_frame_indices
|
||||||
|
|
||||||
def do_sample(
|
def do_sample(
|
||||||
model,
|
model,
|
||||||
sampler,
|
sampler,
|
||||||
@@ -722,6 +847,8 @@ def do_sample(
|
|||||||
force_cond_zero_embeddings=force_cond_zero_embeddings,
|
force_cond_zero_embeddings=force_cond_zero_embeddings,
|
||||||
)
|
)
|
||||||
unload_module_gpu(model.conditioner)
|
unload_module_gpu(model.conditioner)
|
||||||
|
print("anchor_after_condition {}".format(torch.cuda.memory_reserved() / (1024 ** 3)))
|
||||||
|
# torch.cuda.empty_cache()
|
||||||
|
|
||||||
for k in c:
|
for k in c:
|
||||||
if not k == "crossattn":
|
if not k == "crossattn":
|
||||||
@@ -761,14 +888,15 @@ def do_sample(
|
|||||||
return model.denoiser(
|
return model.denoiser(
|
||||||
model.model, input, sigma, c, **additional_model_inputs
|
model.model, input, sigma, c, **additional_model_inputs
|
||||||
)
|
)
|
||||||
|
|
||||||
load_module_gpu(model.model)
|
load_module_gpu(model.model)
|
||||||
load_module_gpu(model.denoiser)
|
load_module_gpu(model.denoiser)
|
||||||
samples_z = sampler(denoiser, randn, cond=c, uc=uc)
|
samples_z = sampler(denoiser, randn, cond=c, uc=uc)
|
||||||
unload_module_gpu(model.model)
|
unload_module_gpu(model.model)
|
||||||
unload_module_gpu(model.denoiser)
|
unload_module_gpu(model.denoiser)
|
||||||
|
print("anchor_after_denoiser {}".format(torch.cuda.memory_reserved() / (1024 ** 3)))
|
||||||
|
# torch.cuda.empty_cache()
|
||||||
load_module_gpu(model.first_stage_model)
|
load_module_gpu(model.first_stage_model)
|
||||||
|
model.en_and_decode_n_samples_a_time = decoding_t
|
||||||
if isinstance(model.first_stage_model.decoder, VideoDecoder):
|
if isinstance(model.first_stage_model.decoder, VideoDecoder):
|
||||||
samples_x = model.decode_first_stage(
|
samples_x = model.decode_first_stage(
|
||||||
samples_z, timesteps=default(decoding_t, T)
|
samples_z, timesteps=default(decoding_t, T)
|
||||||
@@ -777,17 +905,16 @@ def do_sample(
|
|||||||
samples_x = model.decode_first_stage(samples_z)
|
samples_x = model.decode_first_stage(samples_z)
|
||||||
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
unload_module_gpu(model.first_stage_model)
|
unload_module_gpu(model.first_stage_model)
|
||||||
|
|
||||||
if filter is not None:
|
if filter is not None:
|
||||||
samples = filter(samples)
|
samples = filter(samples)
|
||||||
|
|
||||||
if return_latents:
|
if return_latents:
|
||||||
return samples, samples_z
|
return samples, samples_z
|
||||||
|
# torch.cuda.empty_cache()
|
||||||
return samples
|
return samples
|
||||||
|
|
||||||
|
|
||||||
def do_sample_per_step(
|
def prepare_sampling_(
|
||||||
model,
|
model,
|
||||||
sampler,
|
sampler,
|
||||||
value_dict,
|
value_dict,
|
||||||
@@ -797,8 +924,6 @@ def do_sample_per_step(
|
|||||||
batch2model_input: List = None,
|
batch2model_input: List = None,
|
||||||
T=None,
|
T=None,
|
||||||
additional_batch_uc_fields=None,
|
additional_batch_uc_fields=None,
|
||||||
step=None,
|
|
||||||
noisy_latents=None,
|
|
||||||
):
|
):
|
||||||
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
|
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
|
||||||
batch2model_input = default(batch2model_input, [])
|
batch2model_input = default(batch2model_input, [])
|
||||||
@@ -812,8 +937,6 @@ def do_sample_per_step(
|
|||||||
num_samples = [num_samples, T]
|
num_samples = [num_samples, T]
|
||||||
else:
|
else:
|
||||||
num_samples = [num_samples]
|
num_samples = [num_samples]
|
||||||
|
|
||||||
load_module_gpu(model.conditioner)
|
|
||||||
batch, batch_uc = get_batch(
|
batch, batch_uc = get_batch(
|
||||||
get_unique_embedder_keys_from_conditioner(model.conditioner),
|
get_unique_embedder_keys_from_conditioner(model.conditioner),
|
||||||
value_dict,
|
value_dict,
|
||||||
@@ -827,8 +950,7 @@ def do_sample_per_step(
|
|||||||
force_uc_zero_embeddings=force_uc_zero_embeddings,
|
force_uc_zero_embeddings=force_uc_zero_embeddings,
|
||||||
force_cond_zero_embeddings=force_cond_zero_embeddings,
|
force_cond_zero_embeddings=force_cond_zero_embeddings,
|
||||||
)
|
)
|
||||||
unload_module_gpu(model.conditioner)
|
print("dense_after_condition {}".format(torch.cuda.memory_reserved() / (1024 ** 3)))
|
||||||
|
|
||||||
for k in c:
|
for k in c:
|
||||||
if not k == "crossattn":
|
if not k == "crossattn":
|
||||||
c[k], uc[k] = map(
|
c[k], uc[k] = map(
|
||||||
@@ -859,7 +981,14 @@ def do_sample_per_step(
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
additional_model_inputs[k] = batch[k]
|
additional_model_inputs[k] = batch[k]
|
||||||
|
return c, uc, additional_model_inputs
|
||||||
|
|
||||||
|
|
||||||
|
def do_sample_per_step(model, sampler, noisy_latents, c, uc, step, additional_model_inputs):
|
||||||
|
precision_scope = autocast
|
||||||
|
with torch.no_grad():
|
||||||
|
with precision_scope("cuda"):
|
||||||
|
with model.ema_scope():
|
||||||
noisy_latents_scaled, s_in, sigmas, num_sigmas, _, _ = (
|
noisy_latents_scaled, s_in, sigmas, num_sigmas, _, _ = (
|
||||||
sampler.prepare_sampling_loop(
|
sampler.prepare_sampling_loop(
|
||||||
noisy_latents.clone(), c, uc, sampler.num_steps
|
noisy_latents.clone(), c, uc, sampler.num_steps
|
||||||
@@ -893,13 +1022,11 @@ def do_sample_per_step(
|
|||||||
uc,
|
uc,
|
||||||
gamma,
|
gamma,
|
||||||
)
|
)
|
||||||
unload_module_gpu(model.model)
|
print("dense_after_sampling {}".format(torch.cuda.memory_reserved() / (1024 ** 3)))
|
||||||
unload_module_gpu(model.denoiser)
|
|
||||||
|
|
||||||
return samples_z
|
return samples_z
|
||||||
|
|
||||||
|
|
||||||
def run_img2vid_per_step(
|
def prepare_sampling(
|
||||||
version_dict,
|
version_dict,
|
||||||
model,
|
model,
|
||||||
image,
|
image,
|
||||||
@@ -908,8 +1035,6 @@ def run_img2vid_per_step(
|
|||||||
azim_rad=np.linspace(0, 360, 21 + 1)[1:],
|
azim_rad=np.linspace(0, 360, 21 + 1)[1:],
|
||||||
cond_motion=None,
|
cond_motion=None,
|
||||||
cond_view=None,
|
cond_view=None,
|
||||||
step=None,
|
|
||||||
noisy_latents=None,
|
|
||||||
):
|
):
|
||||||
options = version_dict["options"]
|
options = version_dict["options"]
|
||||||
H = version_dict["H"]
|
H = version_dict["H"]
|
||||||
@@ -962,7 +1087,7 @@ def run_img2vid_per_step(
|
|||||||
sampler, num_rows, num_cols = init_sampling_no_st(options=options)
|
sampler, num_rows, num_cols = init_sampling_no_st(options=options)
|
||||||
num_samples = num_rows * num_cols
|
num_samples = num_rows * num_cols
|
||||||
|
|
||||||
samples = do_sample_per_step(
|
c, uc, additional_model_inputs = prepare_sampling_(
|
||||||
model,
|
model,
|
||||||
sampler,
|
sampler,
|
||||||
value_dict,
|
value_dict,
|
||||||
@@ -971,11 +1096,9 @@ def run_img2vid_per_step(
|
|||||||
force_cond_zero_embeddings=options.get("force_cond_zero_embeddings", None),
|
force_cond_zero_embeddings=options.get("force_cond_zero_embeddings", None),
|
||||||
batch2model_input=["num_video_frames", "image_only_indicator"],
|
batch2model_input=["num_video_frames", "image_only_indicator"],
|
||||||
T=T,
|
T=T,
|
||||||
step=step,
|
|
||||||
noisy_latents=noisy_latents,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return samples
|
return c, uc, additional_model_inputs, sampler
|
||||||
|
|
||||||
|
|
||||||
def get_unique_embedder_keys_from_conditioner(conditioner):
|
def get_unique_embedder_keys_from_conditioner(conditioner):
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
N_TIME: 5
|
N_TIME: 5
|
||||||
N_VIEW: 8
|
N_VIEW: 8
|
||||||
N_FRAMES: 40
|
N_FRAMES: 40
|
||||||
|
ENCODE_N_A_TIME: 8
|
||||||
|
|
||||||
model:
|
model:
|
||||||
target: sgm.models.diffusion.DiffusionEngine
|
target: sgm.models.diffusion.DiffusionEngine
|
||||||
@@ -67,6 +68,7 @@ model:
|
|||||||
is_ae: True
|
is_ae: True
|
||||||
n_cond_frames: ${N_FRAMES}
|
n_cond_frames: ${N_FRAMES}
|
||||||
n_copies: 1
|
n_copies: 1
|
||||||
|
en_and_decode_n_samples_a_time: ${ENCODE_N_A_TIME}
|
||||||
encoder_config:
|
encoder_config:
|
||||||
target: sgm.models.autoencoder.AutoencoderKLModeOnly
|
target: sgm.models.autoencoder.AutoencoderKLModeOnly
|
||||||
params:
|
params:
|
||||||
@@ -131,6 +133,7 @@ model:
|
|||||||
is_ae: True
|
is_ae: True
|
||||||
n_cond_frames: ${N_VIEW}
|
n_cond_frames: ${N_VIEW}
|
||||||
n_copies: 1
|
n_copies: 1
|
||||||
|
en_and_decode_n_samples_a_time: ${ENCODE_N_A_TIME}
|
||||||
sigma_sampler_config:
|
sigma_sampler_config:
|
||||||
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
|
target: sgm.modules.diffusionmodules.sigma_sampling.ZeroSampler
|
||||||
|
|
||||||
@@ -141,6 +144,7 @@ model:
|
|||||||
is_ae: True
|
is_ae: True
|
||||||
n_cond_frames: ${N_TIME}
|
n_cond_frames: ${N_TIME}
|
||||||
n_copies: 1
|
n_copies: 1
|
||||||
|
en_and_decode_n_samples_a_time: ${ENCODE_N_A_TIME}
|
||||||
encoder_config:
|
encoder_config:
|
||||||
target: sgm.models.autoencoder.AutoencoderKLModeOnly
|
target: sgm.models.autoencoder.AutoencoderKLModeOnly
|
||||||
params:
|
params:
|
||||||
|
|||||||
@@ -16,9 +16,12 @@ from scripts.demo.sv4d_helpers import (
|
|||||||
initial_model_load,
|
initial_model_load,
|
||||||
read_video,
|
read_video,
|
||||||
run_img2vid,
|
run_img2vid,
|
||||||
run_img2vid_per_step,
|
prepare_sampling,
|
||||||
|
prepare_inputs,
|
||||||
|
do_sample_per_step,
|
||||||
sample_sv3d,
|
sample_sv3d,
|
||||||
save_video,
|
save_video,
|
||||||
|
preprocess_video,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -32,11 +35,11 @@ def sample(
|
|||||||
motion_bucket_id: int = 127,
|
motion_bucket_id: int = 127,
|
||||||
cond_aug: float = 1e-5,
|
cond_aug: float = 1e-5,
|
||||||
seed: int = 23,
|
seed: int = 23,
|
||||||
decoding_t: int = 14, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
decoding_t: int = 4, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
||||||
device: str = "cuda",
|
device: str = "cuda",
|
||||||
elevations_deg: Optional[Union[float, List[float]]] = 10.0,
|
elevations_deg: Optional[Union[float, List[float]]] = 10.0,
|
||||||
azimuths_deg: Optional[List[float]] = None,
|
azimuths_deg: Optional[List[float]] = None,
|
||||||
image_frame_ratio: Optional[float] = None,
|
image_frame_ratio: Optional[float] = 0.917,
|
||||||
verbose: Optional[bool] = False,
|
verbose: Optional[bool] = False,
|
||||||
remove_bg: bool = False,
|
remove_bg: bool = False,
|
||||||
):
|
):
|
||||||
@@ -89,15 +92,16 @@ def sample(
|
|||||||
|
|
||||||
# Read input video frames i.e. images at view 0
|
# Read input video frames i.e. images at view 0
|
||||||
print(f"Reading {input_path}")
|
print(f"Reading {input_path}")
|
||||||
images_v0 = read_video(
|
processed_input_path = preprocess_video(
|
||||||
input_path,
|
input_path,
|
||||||
|
remove_bg=remove_bg,
|
||||||
n_frames=n_frames,
|
n_frames=n_frames,
|
||||||
W=W,
|
W=W,
|
||||||
H=H,
|
H=H,
|
||||||
remove_bg=remove_bg,
|
output_folder=output_folder,
|
||||||
image_frame_ratio=image_frame_ratio,
|
image_frame_ratio=image_frame_ratio,
|
||||||
device=device,
|
|
||||||
)
|
)
|
||||||
|
images_v0 = read_video(processed_input_path, n_frames=n_frames, device=device)
|
||||||
|
|
||||||
# Get camera viewpoints
|
# Get camera viewpoints
|
||||||
if isinstance(elevations_deg, float) or isinstance(elevations_deg, int):
|
if isinstance(elevations_deg, float) or isinstance(elevations_deg, int):
|
||||||
@@ -139,7 +143,7 @@ def sample(
|
|||||||
for t in range(n_frames):
|
for t in range(n_frames):
|
||||||
img_matrix[t][0] = images_v0[t]
|
img_matrix[t][0] = images_v0[t]
|
||||||
|
|
||||||
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 11
|
base_count = len(glob(os.path.join(output_folder, "*.mp4"))) // 12
|
||||||
save_video(
|
save_video(
|
||||||
os.path.join(output_folder, f"{base_count:06d}_t000.mp4"),
|
os.path.join(output_folder, f"{base_count:06d}_t000.mp4"),
|
||||||
img_matrix[0],
|
img_matrix[0],
|
||||||
@@ -171,7 +175,7 @@ def sample(
|
|||||||
azims = azimuths_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
azims = azimuths_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
||||||
azims = (azims - azimuths_rad[v0]) % (torch.pi * 2)
|
azims = (azims - azimuths_rad[v0]) % (torch.pi * 2)
|
||||||
samples = run_img2vid(
|
samples = run_img2vid(
|
||||||
version_dict, model, image, seed, polars, azims, cond_motion, cond_view
|
version_dict, model, image, seed, polars, azims, cond_motion, cond_view, decoding_t
|
||||||
)
|
)
|
||||||
samples = samples.view(T, V, 3, H, W)
|
samples = samples.view(T, V, 3, H, W)
|
||||||
for i, t in enumerate(frame_indices):
|
for i, t in enumerate(frame_indices):
|
||||||
@@ -185,40 +189,48 @@ def sample(
|
|||||||
frame_indices = t0 + np.arange(T)
|
frame_indices = t0 + np.arange(T)
|
||||||
print(f"Sampling dense frames {frame_indices}")
|
print(f"Sampling dense frames {frame_indices}")
|
||||||
latent_matrix = torch.randn(n_frames, n_views, C, H // F, W // F).to("cuda")
|
latent_matrix = torch.randn(n_frames, n_views, C, H // F, W // F).to("cuda")
|
||||||
|
|
||||||
|
polars = polars_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
||||||
|
azims = azimuths_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
||||||
|
azims = (azims - azimuths_rad[v0]) % (torch.pi * 2)
|
||||||
|
|
||||||
|
# alternate between forward and backward conditioning
|
||||||
|
forward_inputs, forward_frame_indices, backward_inputs, backward_frame_indices = prepare_inputs(
|
||||||
|
frame_indices,
|
||||||
|
img_matrix,
|
||||||
|
v0,
|
||||||
|
view_indices,
|
||||||
|
model,
|
||||||
|
version_dict,
|
||||||
|
seed,
|
||||||
|
polars,
|
||||||
|
azims
|
||||||
|
)
|
||||||
|
|
||||||
for step in tqdm(range(num_steps)):
|
for step in tqdm(range(num_steps)):
|
||||||
frame_indices = frame_indices[
|
if step % 2 == 1:
|
||||||
::-1
|
c, uc, additional_model_inputs, sampler = forward_inputs
|
||||||
].copy() # alternate between forward and backward conditioning
|
frame_indices = forward_frame_indices
|
||||||
t0 = frame_indices[0]
|
else:
|
||||||
image = img_matrix[t0][v0]
|
c, uc, additional_model_inputs, sampler = backward_inputs
|
||||||
cond_motion = torch.cat([img_matrix[t][v0] for t in frame_indices], 0)
|
frame_indices = backward_frame_indices
|
||||||
cond_view = torch.cat([img_matrix[t0][v] for v in view_indices], 0)
|
|
||||||
polars = polars_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
|
||||||
azims = azimuths_rad[subsampled_views[1:]][None].repeat(T, 0).flatten()
|
|
||||||
azims = (azims - azimuths_rad[v0]) % (torch.pi * 2)
|
|
||||||
noisy_latents = latent_matrix[frame_indices][:, view_indices].flatten(0, 1)
|
noisy_latents = latent_matrix[frame_indices][:, view_indices].flatten(0, 1)
|
||||||
samples = run_img2vid_per_step(
|
|
||||||
version_dict,
|
samples = do_sample_per_step(
|
||||||
model,
|
model,
|
||||||
image,
|
sampler,
|
||||||
seed,
|
|
||||||
polars,
|
|
||||||
azims,
|
|
||||||
cond_motion,
|
|
||||||
cond_view,
|
|
||||||
step,
|
|
||||||
noisy_latents,
|
noisy_latents,
|
||||||
|
c,
|
||||||
|
uc,
|
||||||
|
step,
|
||||||
|
additional_model_inputs,
|
||||||
)
|
)
|
||||||
samples = samples.view(T, V, C, H // F, W // F)
|
samples = samples.view(T, V, C, H // F, W // F)
|
||||||
for i, t in enumerate(frame_indices):
|
for i, t in enumerate(frame_indices):
|
||||||
for j, v in enumerate(view_indices):
|
for j, v in enumerate(view_indices):
|
||||||
latent_matrix[t, v] = samples[i, j]
|
latent_matrix[t, v] = samples[i, j]
|
||||||
|
|
||||||
for t in frame_indices:
|
img_matrix = decode_latents(model, latent_matrix, img_matrix, frame_indices, view_indices, T)
|
||||||
for v in view_indices:
|
|
||||||
if t != 0 and v != 0:
|
|
||||||
img = decode_latents(model, latent_matrix[t, v][None], T)
|
|
||||||
img_matrix[t][v] = img * 2 - 1
|
|
||||||
|
|
||||||
# Save output videos
|
# Save output videos
|
||||||
for v in view_indices:
|
for v in view_indices:
|
||||||
|
|||||||
Reference in New Issue
Block a user