| import os |
| import time |
| from pprint import pformat |
|
|
| import colossalai |
| import torch |
| import torch.distributed as dist |
| from colossalai.cluster import DistCoordinator |
| from mmengine.runner import set_random_seed |
| from tqdm import tqdm |
|
|
| from opensora.acceleration.parallel_states import set_sequence_parallel_group |
| from opensora.datasets import save_sample |
| from opensora.datasets.aspect import get_image_size, get_num_frames |
| from opensora.datasets.utils import to_pil_images |
| from opensora.models.text_encoder.t5 import text_preprocessing |
| from opensora.registry import MODELS, SCHEDULERS, build_module |
| from opensora.utils.config_utils import parse_configs |
| from opensora.utils.inference_utils import ( |
| add_watermark, |
| append_generated, |
| append_score_to_prompts, |
| apply_mask_strategy, |
| collect_references_batch, |
| dframe_to_frame, |
| extract_json_from_prompts, |
| extract_prompts_loop, |
| get_save_path_name, |
| load_prompts, |
| merge_prompt, |
| prepare_multi_resolution_info, |
| refine_prompts_by_openai, |
| split_prompt, initialize_condition_video, |
| ) |
| from opensora.utils.misc import all_exists, create_logger, is_distributed, is_main_process, to_torch_dtype |
|
|
|
|
| def main(): |
| torch.set_grad_enabled(False) |
| |
| |
| |
| |
| cfg = parse_configs(training=False) |
|
|
| |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
| cfg_dtype = cfg.get("dtype", "fp32") |
| assert cfg_dtype in ["fp16", "bf16", "fp32"], f"Unknown mixed precision {cfg_dtype}" |
| dtype = to_torch_dtype(cfg.get("dtype", "bf16")) |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
|
|
| |
| if is_distributed(): |
| colossalai.launch_from_torch({}) |
| coordinator = DistCoordinator() |
| |
| enable_sequence_parallelism = False |
| if enable_sequence_parallelism: |
| set_sequence_parallel_group(dist.group.WORLD) |
| else: |
| coordinator = None |
| enable_sequence_parallelism = False |
| set_random_seed(seed=cfg.get("seed", 1024), diff_rank_seed=True) |
|
|
| |
| logger = create_logger() |
| logger.info("Inference configuration:\n %s", pformat(cfg.to_dict())) |
| verbose = cfg.get("verbose", 1) |
| progress_wrap = tqdm if verbose == 1 else (lambda x: x) |
|
|
| |
| |
| |
| logger.info("Building models...") |
| |
| text_encoder = build_module(cfg.text_encoder, MODELS, device=device) |
| vae = build_module(cfg.vae, MODELS).to(device, dtype).eval() |
|
|
| |
| image_size = cfg.get("image_size", None) |
| if image_size is None: |
| resolution = cfg.get("resolution", None) |
| aspect_ratio = cfg.get("aspect_ratio", None) |
| assert ( |
| resolution is not None and aspect_ratio is not None |
| ), "resolution and aspect_ratio must be provided if image_size is not provided" |
| image_size = get_image_size(resolution, aspect_ratio) |
| num_frames = get_num_frames(cfg.num_frames) |
| num_ar_frames = None |
| if cfg.get("pa_vdm", False): |
| num_ar_frames = get_num_frames(cfg.get("num_ar_frames", '16s')) |
|
|
| |
| input_size = (num_frames, *image_size) |
| latent_size = vae.get_latent_size(input_size) |
| if cfg.get("pa_vdm", False): |
| num_ar_latent_frames = vae.get_latent_size((num_ar_frames, *image_size))[0] |
| print(f'num_frames: {num_frames}, num_ar_frames: {num_ar_frames}, num_ar_latent_frames: {num_ar_latent_frames}, latent_size: {latent_size}') |
|
|
| model = ( |
| build_module( |
| cfg.model, |
| MODELS, |
| input_size=latent_size, |
| in_channels=vae.out_channels, |
| caption_channels=text_encoder.output_dim, |
| model_max_length=text_encoder.model_max_length, |
| enable_sequence_parallelism=enable_sequence_parallelism, |
| ) |
| .to(device, dtype) |
| .eval() |
| ) |
| text_encoder.y_embedder = model.y_embedder |
|
|
| |
| scheduler = build_module(cfg.scheduler, SCHEDULERS) |
|
|
| |
| |
| |
| |
| prompts = cfg.get("prompt", None) |
| start_idx = cfg.get("start_index", 0) |
| if prompts is None: |
| if cfg.get("prompt_path", None) is not None: |
| prompts = load_prompts(cfg.prompt_path, start_idx, cfg.get("end_index", None)) |
| else: |
| prompts = [cfg.get("prompt_generator", "")] * 1_000_000 |
|
|
| |
| reference_path = cfg.get("reference_path", [""] * len(prompts)) |
| mask_strategy = cfg.get("mask_strategy", [""] * len(prompts)) |
| assert len(reference_path) == len(prompts), "Length of reference must be the same as prompts" |
| assert len(mask_strategy) == len(prompts), "Length of mask_strategy must be the same as prompts" |
|
|
| |
| fps = cfg.fps |
| save_fps = cfg.get("save_fps", fps // cfg.get("frame_interval", 1)) |
| multi_resolution = cfg.get("multi_resolution", None) |
| batch_size = cfg.get("batch_size", 1) |
| num_sample = cfg.get("num_sample", 1) |
| loop = cfg.get("loop", 1) |
| condition_frame_length = cfg.get("condition_frame_length", 5) |
| condition_frame_edit = cfg.get("condition_frame_edit", 0.0) |
| align = cfg.get("align", None) |
|
|
|
|
| if is_main_process(): |
| save_dir = cfg.save_dir |
| save_dir += '' if save_dir.endswith('/') else '/' |
| os.makedirs(save_dir, exist_ok=True) |
| sample_name = cfg.get("sample_name", None) |
| prompt_as_path = cfg.get("prompt_as_path", False) |
|
|
| global_prompts = prompts.copy() |
| if is_distributed(): |
| |
| prompts = prompts[coordinator.rank::coordinator.world_size] |
| mask_strategy = mask_strategy[coordinator.rank::coordinator.world_size] |
| reference_path = reference_path[coordinator.rank::coordinator.world_size] |
|
|
| video_results_rows = [[] for _ in range(len(global_prompts))] |
| |
| for i in progress_wrap(range(0, len(prompts), batch_size)): |
| |
| batch_prompts = prompts[i : i + batch_size] |
| ms = mask_strategy[i : i + batch_size] |
| refs = reference_path[i : i + batch_size] |
|
|
| |
| batch_prompts, refs, ms = extract_json_from_prompts(batch_prompts, refs, ms) |
| original_batch_prompts = batch_prompts |
|
|
| |
| refs = collect_references_batch(refs, vae, image_size) |
|
|
| |
| model_args = prepare_multi_resolution_info( |
| multi_resolution, len(batch_prompts), image_size, num_frames, fps, device, dtype |
| ) |
|
|
| |
| for k in range(num_sample): |
| |
| save_paths = [ |
| get_save_path_name( |
| save_dir, |
| sample_name=sample_name, |
| sample_idx=start_idx + idx, |
| prompt=original_batch_prompts[idx], |
| prompt_as_path=prompt_as_path, |
| num_sample=num_sample, |
| k=k, |
| ) |
| for idx in range(len(batch_prompts)) |
| ] |
|
|
| |
| |
| if prompt_as_path and all_exists(save_paths): |
| continue |
|
|
| |
| |
| |
| batched_prompt_segment_list = [] |
| batched_loop_idx_list = [] |
| for prompt in batch_prompts: |
| prompt_segment_list, loop_idx_list = split_prompt(prompt) |
| batched_prompt_segment_list.append(prompt_segment_list) |
| batched_loop_idx_list.append(loop_idx_list) |
|
|
| |
| if cfg.get("llm_refine", False): |
| |
| |
| |
| if not enable_sequence_parallelism or (enable_sequence_parallelism and is_main_process()): |
| for idx, prompt_segment_list in enumerate(batched_prompt_segment_list): |
| batched_prompt_segment_list[idx] = refine_prompts_by_openai(prompt_segment_list) |
|
|
| |
| if enable_sequence_parallelism: |
| coordinator.block_all() |
| prompt_segment_length = [ |
| len(prompt_segment_list) for prompt_segment_list in batched_prompt_segment_list |
| ] |
|
|
| |
| batched_prompt_segment_list = [ |
| prompt_segment |
| for prompt_segment_list in batched_prompt_segment_list |
| for prompt_segment in prompt_segment_list |
| ] |
|
|
| |
| broadcast_obj_list = [batched_prompt_segment_list] * coordinator.world_size |
| dist.broadcast_object_list(broadcast_obj_list, 0) |
|
|
| |
| batched_prompt_segment_list = [] |
| segment_start_idx = 0 |
| all_prompts = broadcast_obj_list[0] |
| for num_segment in prompt_segment_length: |
| batched_prompt_segment_list.append( |
| all_prompts[segment_start_idx : segment_start_idx + num_segment] |
| ) |
| segment_start_idx += num_segment |
|
|
| |
| for idx, prompt_segment_list in enumerate(batched_prompt_segment_list): |
| batched_prompt_segment_list[idx] = append_score_to_prompts( |
| prompt_segment_list, |
| aes=cfg.get("aes", None), |
| flow=cfg.get("flow", None), |
| camera_motion=cfg.get("camera_motion", None), |
| ) |
|
|
| |
| for idx, prompt_segment_list in enumerate(batched_prompt_segment_list): |
| batched_prompt_segment_list[idx] = [text_preprocessing(prompt) for prompt in prompt_segment_list] |
|
|
| |
| batch_prompts = [] |
| for prompt_segment_list, loop_idx_list in zip(batched_prompt_segment_list, batched_loop_idx_list): |
| batch_prompts.append(merge_prompt(prompt_segment_list, loop_idx_list)) |
|
|
| |
| video_clips = [] |
| for loop_i in range(loop): |
| |
| batch_prompts_loop = extract_prompts_loop(batch_prompts, loop_i) |
|
|
| |
| if loop_i > 0: |
| refs, ms = append_generated( |
| vae, video_clips[-1], refs, ms, loop_i, condition_frame_length, condition_frame_edit |
| ) |
|
|
| |
| z = torch.randn(len(batch_prompts), vae.out_channels, *latent_size, device=device, dtype=dtype) |
| print(f"inference sampling, z shape: {z.shape}") |
| if cfg.get("pa_vdm", False): |
| masks = None |
| z = initialize_condition_video(z, refs) |
| samples = scheduler.sample_pavdm( |
| model, |
| text_encoder, |
| z=z, |
| prompts=batch_prompts_loop, |
| device=device, |
| additional_args=model_args, |
| progress=verbose >= 2, |
| mask=masks, |
| num_frames=latent_size[0], |
| num_ar_latent_frames=num_ar_latent_frames, |
| ) |
| samples = vae.decode(samples.to(dtype), num_frames=num_ar_frames) |
| if z.device.index == 0: |
| print(f'after avd inference and decode, samples shape: {samples.shape}') |
| num_extra_frames = num_ar_frames % save_fps |
| samples = samples[:, :, :-num_extra_frames] |
| if z.device.index == 0: |
| print(f'after removing extra frames, samples shape: {samples.shape}') |
| else: |
| masks = apply_mask_strategy(z, refs, ms, loop_i, align=align) |
| samples = scheduler.sample( |
| model, |
| text_encoder, |
| z=z, |
| prompts=batch_prompts_loop, |
| device=device, |
| additional_args=model_args, |
| progress=verbose >= 2, |
| mask=masks, |
| ) |
| samples = vae.decode(samples.to(dtype), num_frames=num_frames) |
| video_clips.append(samples) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if is_main_process(): |
| for idx, batch_prompt in enumerate(batch_prompts): |
| if verbose >= 2: |
| logger.info("Prompt: %s", batch_prompt) |
| save_path = save_paths[idx] |
| video = [video_clips[i][idx] for i in range(loop)] |
| for i in range(1, loop): |
| video[i] = video[i][:, dframe_to_frame(condition_frame_length) :] |
| video = torch.cat(video, dim=1) |
| save_path = save_sample( |
| video, |
| fps=save_fps, |
| save_path=save_path, |
| verbose=verbose >= 2, |
| ) |
| if save_path.endswith(".mp4") and cfg.get("watermark", False): |
| time.sleep(1) |
| add_watermark(save_path) |
| start_idx += len(batch_prompts) |
|
|
| if is_main_process(): |
| logger.info("Inference finished.") |
| logger.info("Saved %s samples to %s", start_idx, save_dir) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|