|
|
import math |
|
|
import html |
|
|
import ftfy |
|
|
import regex as re |
|
|
import random |
|
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
import argparse |
|
|
import os |
|
|
from tqdm import tqdm |
|
|
from diffusers import AutoencoderKLWan |
|
|
from transformers import ( |
|
|
AutoTokenizer, |
|
|
CLIPImageProcessor, |
|
|
CLIPVisionModel, |
|
|
UMT5EncoderModel, |
|
|
SiglipImageProcessor, |
|
|
SiglipVisionModel |
|
|
) |
|
|
from diffusers.video_processor import VideoProcessor |
|
|
from diffusers.utils import export_to_video, load_image |
|
|
|
|
|
from dataset_tool import CollectionDataset, collate_fn_map |
|
|
from omegaconf import OmegaConf |
|
|
from torch.utils.data import DataLoader |
|
|
|
|
|
import torch |
|
|
import torch.distributed as dist |
|
|
import torch.nn as nn |
|
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
|
from torch.utils.data.distributed import DistributedSampler |
|
|
from torch.utils.data import Subset |
|
|
import torchvision.transforms as transforms |
|
|
import numpy as np |
|
|
import matplotlib.pyplot as plt |
|
|
from matplotlib.animation import FuncAnimation |
|
|
from IPython.display import HTML, display |
|
|
from IPython.display import clear_output |
|
|
|
|
|
from accelerate import Accelerator, DistributedType |
|
|
from accelerate.logging import get_logger |
|
|
from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed |
|
|
from diffusers.training_utils import free_memory |
|
|
|
|
|
from utils_framepack import encode_image |
|
|
|
|
|
def encode_image_1( |
|
|
image_processor, |
|
|
image_encoder, |
|
|
image, |
|
|
device: Optional[torch.device] = "cuda", |
|
|
): |
|
|
device = device |
|
|
image = image_processor(images=image, return_tensors="pt").to(device) |
|
|
image_embeds = image_encoder(**image, output_hidden_states=True) |
|
|
return image_embeds.hidden_states[-2] |
|
|
|
|
|
def basic_clean(text): |
|
|
text = ftfy.fix_text(text) |
|
|
text = html.unescape(html.unescape(text)) |
|
|
return text.strip() |
|
|
|
|
|
|
|
|
def whitespace_clean(text): |
|
|
text = re.sub(r"\s+", " ", text) |
|
|
text = text.strip() |
|
|
return text |
|
|
|
|
|
|
|
|
def prompt_clean(text): |
|
|
text = whitespace_clean(basic_clean(text)) |
|
|
return text |
|
|
|
|
|
|
|
|
def _get_t5_prompt_embeds( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
prompt: Union[str, List[str]] = None, |
|
|
num_videos_per_prompt: int = 1, |
|
|
max_sequence_length: int = 512, |
|
|
caption_dropout_p: float = 0.0, |
|
|
device: Optional[torch.device] = "cuda", |
|
|
dtype: Optional[torch.dtype] = torch.bfloat16, |
|
|
): |
|
|
device = device |
|
|
dtype = dtype |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
prompt = [prompt_clean(u) for u in prompt] |
|
|
batch_size = len(prompt) |
|
|
|
|
|
text_inputs = tokenizer( |
|
|
prompt, |
|
|
padding="max_length", |
|
|
max_length=max_sequence_length, |
|
|
truncation=True, |
|
|
add_special_tokens=True, |
|
|
return_attention_mask=True, |
|
|
return_tensors="pt", |
|
|
) |
|
|
text_input_ids, mask = text_inputs.input_ids, text_inputs.attention_mask |
|
|
|
|
|
prompt_embeds = text_encoder(text_input_ids.to(device), mask.to(device)).last_hidden_state |
|
|
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) |
|
|
|
|
|
if random.random() < caption_dropout_p: |
|
|
prompt_embeds.fill_(0) |
|
|
mask.fill_(False) |
|
|
seq_lens = mask.gt(0).sum(dim=1).long() |
|
|
|
|
|
prompt_embeds = [u[:v] for u, v in zip(prompt_embeds, seq_lens)] |
|
|
prompt_embeds = torch.stack([ |
|
|
torch.cat([u, |
|
|
u.new_zeros(max_sequence_length - u.size(0), u.size(1))]) |
|
|
for u in prompt_embeds |
|
|
], |
|
|
dim=0) |
|
|
|
|
|
|
|
|
_, seq_len, _ = prompt_embeds.shape |
|
|
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) |
|
|
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, |
|
|
seq_len, -1) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
|
|
|
|
|
|
def encode_prompt( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
prompt: Union[str, List[str]], |
|
|
num_videos_per_prompt: int = 1, |
|
|
prompt_embeds: Optional[torch.Tensor] = None, |
|
|
max_sequence_length: int = 512, |
|
|
caption_dropout_p: float = 0.0, |
|
|
device: Optional[torch.device] = "cuda", |
|
|
dtype: Optional[torch.dtype] = torch.bfloat16, |
|
|
): |
|
|
device = device |
|
|
|
|
|
prompt = [prompt] if isinstance(prompt, str) else prompt |
|
|
if prompt is not None: |
|
|
batch_size = len(prompt) |
|
|
else: |
|
|
batch_size = prompt_embeds.shape[0] |
|
|
|
|
|
if prompt_embeds is None: |
|
|
prompt_embeds = _get_t5_prompt_embeds( |
|
|
tokenizer, |
|
|
text_encoder, |
|
|
prompt=prompt, |
|
|
num_videos_per_prompt=num_videos_per_prompt, |
|
|
max_sequence_length=max_sequence_length, |
|
|
caption_dropout_p=caption_dropout_p, |
|
|
device=device, |
|
|
dtype=dtype, |
|
|
) |
|
|
|
|
|
return prompt_embeds |
|
|
|
|
|
def setup_distributed_env(): |
|
|
dist.init_process_group(backend="nccl") |
|
|
torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) |
|
|
|
|
|
def cleanup_distributed_env(): |
|
|
dist.destroy_process_group() |
|
|
|
|
|
def main(rank, world_size, global_rank, batch_size, dataloader_num_workers, config_path, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path): |
|
|
weight_dtype = torch.bfloat16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
os.makedirs(output_latent_folder, exist_ok=True) |
|
|
|
|
|
device = rank |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="tokenizer", |
|
|
) |
|
|
clip_image_processor = CLIPImageProcessor.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="image_processor", |
|
|
) |
|
|
feature_extractor = SiglipImageProcessor.from_pretrained( |
|
|
siglip_model_name_or_path, |
|
|
subfolder="feature_extractor", |
|
|
) |
|
|
|
|
|
|
|
|
text_encoder = UMT5EncoderModel.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="text_encoder", |
|
|
torch_dtype=torch.float16, |
|
|
) |
|
|
clip_image_encoder = CLIPVisionModel.from_pretrained( |
|
|
args.pretrained_model_name_or_path, |
|
|
subfolder="image_encoder", |
|
|
torch_dtype=torch.float16, |
|
|
) |
|
|
image_encoder = SiglipVisionModel.from_pretrained( |
|
|
siglip_model_name_or_path, |
|
|
subfolder="image_encoder", |
|
|
torch_dtype=weight_dtype, |
|
|
) |
|
|
|
|
|
|
|
|
vae = AutoencoderKLWan.from_pretrained( |
|
|
pretrained_model_name_or_path, |
|
|
subfolder="vae", |
|
|
torch_dtype=torch.float32, |
|
|
) |
|
|
vae_scale_factor_spatial = vae.spatial_compression_ratio |
|
|
video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) |
|
|
|
|
|
vae.requires_grad_(False) |
|
|
text_encoder.requires_grad_(False) |
|
|
clip_image_encoder.requires_grad_(False) |
|
|
image_encoder.requires_grad_(False) |
|
|
vae.eval() |
|
|
text_encoder.eval() |
|
|
clip_image_encoder.eval() |
|
|
image_encoder.eval() |
|
|
|
|
|
vae = vae.to(device) |
|
|
text_encoder = text_encoder.to(device) |
|
|
image_encoder = image_encoder.to(device) |
|
|
clip_image_encoder = clip_image_encoder.to(device) |
|
|
|
|
|
dist.barrier() |
|
|
configs = OmegaConf.load(config_path) |
|
|
dataset = CollectionDataset.create_dataset_function(configs['train_data'], |
|
|
configs['train_data_weights'], |
|
|
**configs['data']['params']) |
|
|
print(len(dataset)) |
|
|
|
|
|
if global_rank == 0: |
|
|
pbar = tqdm(total=len(dataset) // world_size, desc="Processing") |
|
|
dist.barrier() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def distributed_iterate_dataset(dataset, world_size, rank): |
|
|
iterator = iter(dataset) |
|
|
sample_count = 0 |
|
|
|
|
|
while True: |
|
|
try: |
|
|
batch = next(iterator) |
|
|
|
|
|
if sample_count % world_size == rank: |
|
|
processed_batch = collate_fn_map(batch) |
|
|
yield processed_batch |
|
|
|
|
|
sample_count += 1 |
|
|
|
|
|
except StopIteration: |
|
|
break |
|
|
|
|
|
for idx, batch in enumerate(distributed_iterate_dataset(dataset, dist.get_world_size(), dist.get_rank())): |
|
|
valid_indices = [] |
|
|
valid_uttids = [] |
|
|
valid_num_frames = [] |
|
|
valid_heights = [] |
|
|
valid_widths = [] |
|
|
valid_videos = [] |
|
|
valid_prompts = [] |
|
|
valid_first_frames_images = [] |
|
|
|
|
|
for i, (uttid, num_frame, height, width) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"])): |
|
|
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") |
|
|
if not os.path.exists(output_path): |
|
|
valid_indices.append(i) |
|
|
valid_uttids.append(uttid) |
|
|
valid_num_frames.append(num_frame) |
|
|
valid_heights.append(height) |
|
|
valid_widths.append(width) |
|
|
valid_videos.append(batch["videos"][i]) |
|
|
valid_prompts.append(batch["prompts"][i]) |
|
|
valid_first_frames_images.append(batch["first_frames_images"][i]) |
|
|
else: |
|
|
print(f"skipping {uttid}") |
|
|
|
|
|
if not valid_indices: |
|
|
print("skipping entire batch!") |
|
|
continue |
|
|
|
|
|
batch = { |
|
|
"uttid": valid_uttids, |
|
|
"video_metadata": { |
|
|
"num_frames": valid_num_frames, |
|
|
"height": valid_heights, |
|
|
"width": valid_widths |
|
|
}, |
|
|
"videos": torch.stack(valid_videos), |
|
|
"prompts": valid_prompts, |
|
|
"first_frames_images": torch.stack(valid_first_frames_images) |
|
|
} |
|
|
|
|
|
if len(batch["uttid"]) == 0: |
|
|
print("All samples in this batch are already processed, skipping!") |
|
|
continue |
|
|
|
|
|
with torch.no_grad(): |
|
|
|
|
|
latents_mean = torch.tensor( |
|
|
vae.config.latents_mean).view( |
|
|
1, vae.config.z_dim, 1, 1, |
|
|
1).to(vae.device, vae.dtype) |
|
|
latents_std = 1.0 / torch.tensor( |
|
|
vae.config.latents_std).view( |
|
|
1, vae.config.z_dim, 1, 1, 1).to( |
|
|
vae.device, vae.dtype) |
|
|
pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device) |
|
|
vae_latents = vae.encode(pixel_values).latent_dist.sample() |
|
|
vae_latents = (vae_latents - latents_mean) * latents_std |
|
|
|
|
|
|
|
|
prompts = batch["prompts"] |
|
|
prompt_embeds = encode_prompt( |
|
|
tokenizer=tokenizer, |
|
|
text_encoder=text_encoder, |
|
|
prompt=prompts, |
|
|
device=device, |
|
|
) |
|
|
|
|
|
|
|
|
image_tensor = batch["first_frames_images"] |
|
|
images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor] |
|
|
|
|
|
clip_image_embeds = encode_image_1( |
|
|
image_processor=clip_image_processor, |
|
|
image_encoder=clip_image_encoder, |
|
|
image=images, |
|
|
device=device |
|
|
) |
|
|
|
|
|
image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1]) |
|
|
image_embeds = encode_image( |
|
|
feature_extractor, |
|
|
image_encoder, |
|
|
image, |
|
|
device=device, |
|
|
dtype=weight_dtype, |
|
|
) |
|
|
|
|
|
for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_clip_image_embed, cur_image_embed in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, clip_image_embeds, image_embeds): |
|
|
output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") |
|
|
torch.save( |
|
|
{ |
|
|
"vae_latent": cur_vae_latent.cpu().detach(), |
|
|
"prompt_embed": cur_prompt_embed.cpu().detach(), |
|
|
"clip_image_embeds": cur_clip_image_embed.cpu().detach(), |
|
|
"image_embeds": cur_image_embed.cpu().detach(), |
|
|
}, |
|
|
output_path |
|
|
) |
|
|
print(f"save to: {output_path}") |
|
|
|
|
|
if global_rank == 0: |
|
|
pbar.update(1) |
|
|
pbar.set_postfix({"batch": idx}) |
|
|
free_memory() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
parser = argparse.ArgumentParser(description="Script for running model training and data processing.") |
|
|
parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing") |
|
|
parser.add_argument("--dataloader_num_workers", type=int, default=8, help="Number of workers for data loading") |
|
|
parser.add_argument("--config_path", type=str, default="part1.yaml", help="Path to the config file") |
|
|
parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents_wan", help="Folder to store output latents") |
|
|
parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Wan-AI/Wan2.1-I2V-14B-720P-Diffusers/", help="Pretrained model path") |
|
|
parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path") |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
setup_distributed_env() |
|
|
|
|
|
global_rank = dist.get_rank() |
|
|
local_rank = int(os.environ["LOCAL_RANK"]) |
|
|
device = torch.cuda.current_device() |
|
|
world_size = dist.get_world_size() |
|
|
|
|
|
main( |
|
|
world_size=world_size, |
|
|
rank=device, |
|
|
global_rank=global_rank, |
|
|
batch_size=args.batch_size, |
|
|
dataloader_num_workers=args.dataloader_num_workers, |
|
|
config_path=args.config_path, |
|
|
output_latent_folder=args.output_latent_folder, |
|
|
pretrained_model_name_or_path=args.pretrained_model_name_or_path, |
|
|
siglip_model_name_or_path=args.siglip_model_name_or_path |
|
|
) |