| import json | |
| import argparse | |
| import os | |
| from tqdm import tqdm | |
| from diffusers import AutoencoderKLHunyuanVideo | |
| from transformers import ( | |
| CLIPTextModel, | |
| CLIPTokenizer, | |
| LlamaModel, | |
| LlamaTokenizerFast, | |
| SiglipImageProcessor, | |
| SiglipVisionModel, | |
| ) | |
| from diffusers.video_processor import VideoProcessor | |
| from diffusers.utils import export_to_video, load_image | |
| from dataset_tool import CollectionDataset, collate_fn_map | |
| from omegaconf import OmegaConf | |
| from torch.utils.data import DataLoader | |
| import torch | |
| import torch.distributed as dist | |
| import torch.nn as nn | |
| from torch.nn.parallel import DistributedDataParallel as DDP | |
| from torch.utils.data.distributed import DistributedSampler | |
| from torch.utils.data import Subset | |
| import torchvision.transforms as transforms | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from matplotlib.animation import FuncAnimation | |
| from IPython.display import HTML, display | |
| from IPython.display import clear_output # 用于清理历史输出 | |
| from accelerate import Accelerator, DistributedType | |
| from accelerate.logging import get_logger | |
| from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed | |
| from diffusers.training_utils import free_memory | |
| from utils_framepack import encode_image, encode_prompt | |
| def setup_distributed_env(): | |
| dist.init_process_group(backend="nccl") | |
| torch.cuda.set_device(int(os.environ["LOCAL_RANK"])) | |
| def cleanup_distributed_env(): | |
| dist.destroy_process_group() | |
| def main(rank, world_size, global_rank, batch_size, dataloader_num_workers, config_path, output_latent_folder, pretrained_model_name_or_path, siglip_model_name_or_path): | |
| weight_dtype = torch.bfloat16 | |
| # batch_size = 2 | |
| # dataloader_num_workers = 8 | |
| # config_path = "512_collection_config_vae1011_aligned_full_dump.yaml" | |
| # output_latent_folder = "/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents" | |
| # pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo" | |
| # siglip_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl" | |
| base_folder = output_latent_folder | |
| device = rank | |
| # Load the tokenizers | |
| # tokenizer_one = LlamaTokenizerFast.from_pretrained( | |
| # pretrained_model_name_or_path, | |
| # subfolder="tokenizer", | |
| # ) | |
| # tokenizer_two = CLIPTokenizer.from_pretrained( | |
| # pretrained_model_name_or_path, | |
| # subfolder="tokenizer_2", | |
| # ) | |
| # feature_extractor = SiglipImageProcessor.from_pretrained( | |
| # siglip_model_name_or_path, | |
| # subfolder="feature_extractor", | |
| # ) | |
| # vae = AutoencoderKLHunyuanVideo.from_pretrained( | |
| # pretrained_model_name_or_path, | |
| # subfolder="vae", | |
| # torch_dtype=torch.float32, | |
| # ) | |
| # vae_scale_factor_spatial = vae.spatial_compression_ratio | |
| # video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) | |
| # text_encoder_one = LlamaModel.from_pretrained( | |
| # pretrained_model_name_or_path, | |
| # subfolder="text_encoder", | |
| # torch_dtype=weight_dtype, | |
| # ) | |
| # text_encoder_two = CLIPTextModel.from_pretrained( | |
| # pretrained_model_name_or_path, | |
| # subfolder="text_encoder_2", | |
| # torch_dtype=weight_dtype, | |
| # ) | |
| # image_encoder = SiglipVisionModel.from_pretrained( | |
| # siglip_model_name_or_path, | |
| # subfolder="image_encoder", | |
| # torch_dtype=weight_dtype, | |
| # ) | |
| # vae.requires_grad_(False) | |
| # text_encoder_one.requires_grad_(False) | |
| # text_encoder_two.requires_grad_(False) | |
| # image_encoder.requires_grad_(False) | |
| # vae.eval() | |
| # text_encoder_one.eval() | |
| # text_encoder_two.eval() | |
| # image_encoder.eval() | |
| # vae = vae.to(device) | |
| # text_encoder_one = text_encoder_one.to(device) | |
| # text_encoder_two = text_encoder_two.to(device) | |
| # image_encoder = image_encoder.to(device) | |
| dist.barrier() | |
| configs = OmegaConf.load(config_path) | |
| dataset = CollectionDataset.create_dataset_function(configs['train_data'], | |
| configs['train_data_weights'], | |
| **configs['data']['params']) | |
| print(len(dataset)) | |
| sampler = DistributedSampler(dataset, rank=rank, num_replicas=world_size,) | |
| dataloader = DataLoader( | |
| dataset, | |
| shuffle=False, | |
| batch_size=batch_size, | |
| collate_fn=collate_fn_map, | |
| num_workers=dataloader_num_workers, | |
| pin_memory=False, | |
| prefetch_factor=2 if dataloader_num_workers != 0 else None, | |
| persistent_workers=False, | |
| ) | |
| sampler.set_epoch(0) | |
| if global_rank == 0: | |
| pbar = tqdm(total=len(dataloader), desc="Processing") | |
| dist.barrier() | |
| for idx, batch in enumerate(dataloader): | |
| dist.barrier() | |
| free_memory() | |
| output_json = { | |
| "uttid": batch["uttid"][0], | |
| "topk_avg_motion_scores_t": batch["topk_avg_motion_scores_t"].item(), | |
| } | |
| if batch["topk_avg_motion_scores_t"].item() >= 400: | |
| base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/new_metadata/high_motion" | |
| else: | |
| base_path="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos/new_metadata/low_motion" | |
| os.makedirs(base_path, exist_ok=True) | |
| output_path = os.path.join(base_path, f"{batch['uttid'][0]}.json") | |
| if os.path.exists(output_path): | |
| print(f"skipping: {output_path}") | |
| continue | |
| with open(output_path, 'w',) as f: | |
| json.dump(output_json, f, indent=2) | |
| print(f"save json to {output_path}") | |
| batch = None | |
| output_json = None | |
| del batch | |
| del output_json | |
| free_memory() | |
| # valid_indices = [] | |
| # valid_uttids = [] | |
| # valid_num_frames = [] | |
| # valid_heights = [] | |
| # valid_widths = [] | |
| # valid_videos = [] | |
| # valid_prompts = [] | |
| # valid_first_frames_images = [] | |
| # valid_stride_videos = [] | |
| # for i, (uttid, num_frame, height, width, topk_avg_motion_scores_t) in enumerate(zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], batch["topk_avg_motion_scores_t"])): | |
| # if topk_avg_motion_scores_t != -1: | |
| # output_latent_folder = os.path.join(base_folder, "latents/high_motion") | |
| # else: | |
| # output_latent_folder = os.path.join(base_folder, "latents/low_motion") | |
| # os.makedirs(output_latent_folder, exist_ok=True) | |
| # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") | |
| # if not os.path.exists(output_path): | |
| # valid_indices.append(i) | |
| # valid_uttids.append(uttid) | |
| # valid_num_frames.append(num_frame) | |
| # valid_heights.append(height) | |
| # valid_widths.append(width) | |
| # valid_videos.append(batch["videos"][i]) | |
| # valid_prompts.append(batch["prompts"][i]) | |
| # valid_first_frames_images.append(batch["first_frames_images"][i]) | |
| # valid_stride_videos.append(batch["stride_videos"][i]) | |
| # else: | |
| # print(f"skipping {uttid}") | |
| # if not valid_indices: | |
| # print("skipping entire batch!") | |
| # continue | |
| # batch = None | |
| # del batch | |
| # free_memory() | |
| # batch = { | |
| # "uttid": valid_uttids, | |
| # "video_metadata": { | |
| # "num_frames": valid_num_frames, | |
| # "height": valid_heights, | |
| # "width": valid_widths | |
| # }, | |
| # "videos": torch.stack(valid_videos), | |
| # "prompts": valid_prompts, | |
| # "first_frames_images": torch.stack(valid_first_frames_images), | |
| # "stride_videos": torch.stack(valid_stride_videos), | |
| # } | |
| # if len(batch["uttid"]) == 0: | |
| # print("All samples in this batch are already processed, skipping!") | |
| # continue | |
| # with torch.no_grad(): | |
| # # Get Vae feature 1 | |
| # pixel_values = batch["videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device) | |
| # vae_latents = vae.encode(pixel_values).latent_dist.sample() | |
| # vae_latents = vae_latents * vae.config.scaling_factor | |
| # # Get Vae feature 2 | |
| # pixel_values_2 = batch["stride_videos"].permute(0, 2, 1, 3, 4).to(dtype=vae.dtype, device=device) | |
| # vae_latents_2 = vae.encode(pixel_values_2).latent_dist.sample() | |
| # vae_latents_2 = vae_latents_2 * vae.config.scaling_factor | |
| # # Encode prompts | |
| # prompts = batch["prompts"] | |
| # prompt_embeds, pooled_prompt_embeds, prompt_attention_mask = encode_prompt( | |
| # tokenizer=tokenizer_one, | |
| # text_encoder=text_encoder_one, | |
| # tokenizer_2=tokenizer_two, | |
| # text_encoder_2=text_encoder_two, | |
| # prompt=prompts, | |
| # device=device, | |
| # ) | |
| # # Prepare images | |
| # image_tensor = batch["first_frames_images"] | |
| # images = [transforms.ToPILImage()(x.to(torch.uint8)) for x in image_tensor] | |
| # image = video_processor.preprocess(image=images, height=batch["videos"].shape[-2], width=batch["videos"].shape[-1]) | |
| # image_embeds = encode_image( | |
| # feature_extractor, | |
| # image_encoder, | |
| # image, | |
| # device=device, | |
| # dtype=weight_dtype, | |
| # ) | |
| # for uttid, num_frame, height, width, cur_vae_latent, cur_prompt_embed, cur_pooled_prompt_embed, cur_prompt_attention_mask, cur_image_embed, cur_vae_latents_2 in zip(batch["uttid"], batch["video_metadata"]["num_frames"], batch["video_metadata"]["height"], batch["video_metadata"]["width"], vae_latents, prompt_embeds, pooled_prompt_embeds, prompt_attention_mask, image_embeds, vae_latents_2): | |
| # output_path = os.path.join(output_latent_folder, f"{uttid}_{num_frame}_{height}_{width}.pt") | |
| # temp_to_save = { | |
| # "vae_latent": cur_vae_latent.cpu().detach(), | |
| # "prompt_embed": cur_prompt_embed.cpu().detach(), | |
| # "pooled_prompt_embeds": cur_pooled_prompt_embed.cpu().detach(), | |
| # "prompt_attention_mask": cur_prompt_attention_mask.cpu().detach(), | |
| # "image_embeds": cur_image_embed.cpu().detach(), | |
| # "vae_latents_2": cur_vae_latents_2.cpu().detach(), | |
| # } | |
| # torch.save( | |
| # temp_to_save, | |
| # output_path | |
| # ) | |
| # print(f"save latent to: {output_path}") | |
| if global_rank == 0: | |
| pbar.update(1) | |
| pbar.set_postfix({"batch": idx}) | |
| pixel_values = None | |
| pixel_values_2 = None | |
| prompts = None | |
| image_tensor = None | |
| images = None | |
| vae_latents = None | |
| vae_latents_2 = None | |
| image_embeds = None | |
| prompt_embeds = None | |
| pooled_prompt_embeds = None | |
| prompt_attention_mask = None | |
| batch = None | |
| valid_indices = None | |
| valid_uttids = None | |
| valid_num_frames = None | |
| valid_heights = None | |
| valid_widths = None | |
| valid_videos = None | |
| valid_prompts = None | |
| valid_first_frames_images = None | |
| valid_stride_videos = None | |
| temp_to_save = None | |
| del pixel_values | |
| del pixel_values_2 | |
| del prompts | |
| del image_tensor | |
| del images | |
| del vae_latents | |
| del vae_latents_2 | |
| del image_embeds | |
| del batch | |
| del valid_indices | |
| del valid_uttids | |
| del valid_num_frames | |
| del valid_heights | |
| del valid_widths | |
| del valid_videos | |
| del valid_prompts | |
| del valid_first_frames_images | |
| del valid_stride_videos | |
| del temp_to_save | |
| free_memory() | |
| if __name__ == "__main__": | |
| parser = argparse.ArgumentParser(description="Script for running model training and data processing.") | |
| parser.add_argument("--batch_size", type=int, default=1, help="Batch size for processing") | |
| parser.add_argument("--dataloader_num_workers", type=int, default=0, help="Number of workers for data loading") | |
| parser.add_argument("--config_path", type=str, default="part1.yaml", help="Path to the config file") | |
| parser.add_argument("--output_latent_folder", type=str, default="/mnt/bn/yufan-dev-my/ysh/Datasets/sft_sftnews_videos", help="Folder to store output latents") | |
| parser.add_argument("--pretrained_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/hunyuanvideo-community/HunyuanVideo", help="Pretrained model path") | |
| parser.add_argument("--siglip_model_name_or_path", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/lllyasviel/flux_redux_bfl", help="Siglip model path") | |
| args = parser.parse_args() | |
| setup_distributed_env() | |
| global_rank = dist.get_rank() | |
| local_rank = int(os.environ["LOCAL_RANK"]) | |
| device = torch.cuda.current_device() | |
| world_size = dist.get_world_size() | |
| main( | |
| world_size=world_size, | |
| rank=device, | |
| global_rank=global_rank, | |
| batch_size=args.batch_size, | |
| dataloader_num_workers=args.dataloader_num_workers, | |
| config_path=args.config_path, | |
| output_latent_folder=args.output_latent_folder, | |
| pretrained_model_name_or_path=args.pretrained_model_name_or_path, | |
| siglip_model_name_or_path=args.siglip_model_name_or_path | |
| ) |