| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import os |
| root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..") |
| from dataclasses import dataclass, field |
| import logging |
| import pathlib |
| from typing import Dict, Optional, Sequence, List |
|
|
| import torch |
| import transformers |
| import sys |
| sys.path.append(root_dir) |
| from vtimellm import conversation as conversation_lib |
| from vtimellm.train.vtimellm_trainer import VTimeLLMTrainer |
| from vtimellm.train.dataset import make_supervised_data_module, DataArguments |
| from vtimellm.model import VTimeLLMLlamaForCausalLM, VTimeLLMChatGLMForCausalLM |
| from vtimellm.model.builder import load_lora |
| from vtimellm.mm_utils import print_trainable_parameters |
|
|
| local_rank = None |
|
|
| def rank0_print(*args): |
| if local_rank == 0: |
| print(*args) |
|
|
| @dataclass |
| class ModelArguments: |
| model_name_or_path: Optional[str] = field(default="checkpoints/vtimellm/vicuna-7b-v1.5") |
| stage2_path: Optional[str] = field(default='checkpoints/vtimellm/vtimellm-vicuna-v1-5-7b-stage2') |
| stage3_path: Optional[str] = field(default='checkpoints/vtimellm/vtimellm-vicuna-v1-5-7b-stage3') |
| version: Optional[str] = field(default="v0") |
| tune_mm_mlp_adapter: bool = field(default=False) |
| pretrain_mm_mlp_adapter: Optional[str] = field(default=None) |
|
|
| |
| |
| mm_projector_type: Optional[str] = field(default='stc_connector') |
| tune_mm_mlp_adapter: bool = field(default=False) |
|
|
| |
| vision_tower: Optional[str] = field(default=None) |
| mm_vision_select_layer: Optional[int] = field(default=-2) |
| mm_vision_select_feature: Optional[str] = field(default="patch") |
| |
| mm_use_im_start_end: bool = field(default=False) |
| mm_use_im_patch_token: bool = field(default=False) |
| pretrain_model_name_or_path: Optional[str] = field(default=None, metadata={"help": "To train from previously trained checkpoints. E.g, further fine-tuning based on the finetuned version of the whole model."}) |
| |
|
|
|
|
| @dataclass |
| class TrainingArguments(transformers.TrainingArguments): |
| training_stage: int = field(default=2) |
| finetuning: bool = field(default=False) |
| cache_dir: Optional[str] = field(default=None) |
| optim: str = field(default="adamw_torch") |
| remove_unused_columns: bool = field(default=False) |
| freeze_mm_mlp_adapter: bool = field(default=False) |
| model_max_length: int = field( |
| default=512, |
| metadata={ |
| "help": |
| "Maximum sequence length. Sequences will be right padded (and possibly truncated)." |
| }, |
| ) |
| double_quant: bool = field( |
| default=True, |
| metadata={"help": "Compress the quantization statistics through double quantization."} |
| ) |
| quant_type: str = field( |
| default="nf4", |
| metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."} |
| ) |
| bits: int = field( |
| default=16, |
| metadata={"help": "How many bits to use."} |
| ) |
| lora_enable: bool = False |
| lora_r: int = 64 |
| lora_alpha: int = 16 |
| lora_dropout: float = 0.05 |
| lora_weight_path: str = "" |
| lora_bias: str = "none" |
|
|
|
|
|
|
|
|
| def maybe_zero_3(param, ignore_status=False, name=None): |
| from deepspeed import zero |
| from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus |
| if hasattr(param, "ds_id"): |
| if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: |
| if not ignore_status: |
| logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}") |
| with zero.GatheredParameters([param]): |
| param = param.data.detach().cpu().clone() |
| else: |
| param = param.detach().cpu().clone() |
| return param |
|
|
|
|
| |
| def get_peft_state_maybe_zero_3(named_params, bias): |
| if bias == "none": |
| to_return = {k: t for k, t in named_params if "lora_" in k} |
| elif bias == "all": |
| to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k} |
| elif bias == "lora_only": |
| to_return = {} |
| maybe_lora_bias = {} |
| lora_bias_names = set() |
| for k, t in named_params: |
| if "lora_" in k: |
| to_return[k] = t |
| bias_name = k.split("lora_")[0] + "bias" |
| lora_bias_names.add(bias_name) |
| elif "bias" in k: |
| maybe_lora_bias[k] = t |
| for k, t in maybe_lora_bias: |
| if bias_name in lora_bias_names: |
| to_return[bias_name] = t |
| else: |
| raise NotImplementedError |
| to_return = {k: maybe_zero_3(v, name=k) for k, v in to_return.items()} |
| return to_return |
|
|
|
|
| def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True): |
| to_return = {k: t for k, t in named_params if "lora_" not in k} |
| if require_grad_only: |
| to_return = {k: t for k, t in to_return.items() if t.requires_grad} |
| to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} |
| return to_return |
|
|
|
|
| def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): |
| to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} |
| to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} |
| return to_return |
|
|
|
|
| def find_all_linear_names(model): |
| cls = torch.nn.Linear |
| lora_module_names = set() |
| for name, module in model.named_modules(): |
| if isinstance(module, cls): |
| names = name.split('.') |
| lora_module_names.add(names[0] if len(names) == 1 else names[-1]) |
|
|
|
|
| if 'lm_head' in lora_module_names: |
| lora_module_names.remove('lm_head') |
| return list(lora_module_names) |
|
|
|
|
| def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, |
| output_dir: str): |
| """Collects the state dict and dump to disk.""" |
|
|
| if getattr(trainer.args, "tune_mm_mlp_adapter", False): |
| |
| keys_to_match = ['mm_projector'] |
| if getattr(trainer.args, "use_im_start_end", False): |
| keys_to_match.extend(['embed_tokens', 'embed_in']) |
|
|
| weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match) |
| trainer.model.config.save_pretrained(output_dir) |
|
|
| current_folder = output_dir.split('/')[-1] |
| parent_folder = os.path.dirname(output_dir) |
| if trainer.args.local_rank == 0 or trainer.args.local_rank == -1: |
| if current_folder.startswith('checkpoint-'): |
| mm_projector_folder = os.path.join(parent_folder, "mm_projector") |
| os.makedirs(mm_projector_folder, exist_ok=True) |
| torch.save(weight_to_save, os.path.join(mm_projector_folder, f'{current_folder}.bin')) |
| else: |
| torch.save(weight_to_save, os.path.join(output_dir, f'mm_projector.bin')) |
| return |
|
|
| if trainer.deepspeed: |
| torch.cuda.synchronize() |
| trainer.save_model(output_dir) |
| return |
|
|
| state_dict = trainer.model.state_dict() |
| if trainer.args.should_save: |
| cpu_state_dict = { |
| key: value.cpu() |
| for key, value in state_dict.items() |
| } |
| del state_dict |
| trainer._save(output_dir, state_dict=cpu_state_dict) |
|
|
|
|
| def smart_tokenizer_and_embedding_resize( |
| special_tokens_dict: Dict, |
| tokenizer: transformers.PreTrainedTokenizer, |
| model: transformers.PreTrainedModel, |
| ): |
| """Resize tokenizer and embedding. |
| |
| Note: This is the unoptimized version that may make your embedding size not be divisible by 64. |
| """ |
| num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) |
| model.resize_token_embeddings(len(tokenizer)) |
|
|
| if num_new_tokens > 0: |
| input_embeddings = model.get_input_embeddings().weight.data |
| output_embeddings = model.get_output_embeddings().weight.data |
|
|
| input_embeddings_avg = input_embeddings[:-num_new_tokens].mean( |
| dim=0, keepdim=True) |
| output_embeddings_avg = output_embeddings[:-num_new_tokens].mean( |
| dim=0, keepdim=True) |
|
|
| input_embeddings[-num_new_tokens:] = input_embeddings_avg |
| output_embeddings[-num_new_tokens:] = output_embeddings_avg |
|
|
|
|
| def train(): |
| global local_rank |
|
|
| parser = transformers.HfArgumentParser( |
| (ModelArguments, DataArguments, TrainingArguments)) |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
| local_rank = training_args.local_rank |
| compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) |
|
|
|
|
| bnb_model_from_pretrained_args = {} |
| if training_args.bits in [4, 8]: |
| from transformers import BitsAndBytesConfig |
| bnb_model_from_pretrained_args.update(dict( |
| device_map={"": training_args.device}, |
| load_in_4bit=training_args.bits == 4, |
| load_in_8bit=training_args.bits == 8, |
| quantization_config=BitsAndBytesConfig( |
| load_in_4bit=training_args.bits == 4, |
| load_in_8bit=training_args.bits == 8, |
| llm_int8_threshold=6.0, |
| llm_int8_has_fp16_weight=False, |
| bnb_4bit_compute_dtype=compute_dtype, |
| bnb_4bit_use_double_quant=training_args.double_quant, |
| bnb_4bit_quant_type=training_args.quant_type |
| ) |
| )) |
|
|
| if 'chatglm' in model_args.model_name_or_path: |
| model = VTimeLLMChatGLMForCausalLM.from_pretrained( |
| model_args.model_name_or_path, empty_init=False, device='cuda' |
| ) |
|
|
| elif 'VideoLLaMA2' in model_args.model_name_or_path: |
| config = transformers.AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) |
| config._attn_implementation = 'flash_attention_2' |
| model = Videollama2MistralForCausalLM.from_pretrained( |
| model_args.model_name_or_path, |
| config=config, |
| cache_dir=training_args.cache_dir, |
| torch_dtype=(torch.bfloat16 if training_args.bf16 else None), |
| do_sample=True, |
| **bnb_model_from_pretrained_args |
| ) |
|
|
| else: |
| model = VTimeLLMLlamaForCausalLM.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=training_args.cache_dir, |
| **bnb_model_from_pretrained_args |
| ) |
| model.config.use_cache = False |
|
|
| if training_args.bits in [4, 8]: |
| from peft import prepare_model_for_kbit_training |
| model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32)) |
| model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) |
|
|
| if training_args.gradient_checkpointing: |
| if hasattr(model, "enable_input_require_grads"): |
| model.enable_input_require_grads() |
| else: |
| def make_inputs_require_grad(module, input, output): |
| output.requires_grad_(True) |
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
|
|
| if 'chatglm' in model_args.model_name_or_path: |
| tokenizer = transformers.AutoTokenizer.from_pretrained( |
| model_args.model_name_or_path, |
| trust_remote_code=True |
| ) |
| else: |
| tokenizer = transformers.AutoTokenizer.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=training_args.cache_dir, |
| model_max_length=training_args.model_max_length, |
| padding_side="right", |
| use_fast=False, |
| ) |
| tokenizer.pad_token = tokenizer.unk_token |
|
|
| if training_args.lora_enable: |
| from peft import LoraConfig, get_peft_model |
| lora_config = LoraConfig( |
| r=training_args.lora_r, |
| lora_alpha=training_args.lora_alpha, |
| target_modules=find_all_linear_names(model), |
| lora_dropout=training_args.lora_dropout, |
| bias=training_args.lora_bias, |
| task_type="CAUSAL_LM", |
| ) |
| if training_args.bits == 16: |
| if training_args.bf16: |
| model.to(torch.bfloat16) |
| if training_args.fp16: |
| model.to(torch.float16) |
|
|
|
|
| model = model.cuda() |
|
|
| |
| if training_args.training_stage == 3: |
| model.get_model().initialize_vision_modules(model_args) |
|
|
| model = load_lora(model, model_args.stage2_path) |
| rank0_print('Merging stage 2 LoRA weights...') |
| model = model.merge_and_unload() |
| |
| if training_args.finetuning: |
| |
| |
| rank0_print("*" * 90) |
| rank0_print("Preparing for stage 4 (finetuning)") |
|
|
| model = load_lora(model, model_args.stage3_path) |
| rank0_print('Merging stage 3 LoRA weights...') |
| model = model.merge_and_unload() |
| rank0_print("*" * 90) |
|
|
| rank0_print("Adding LoRA adapters...") |
| model = get_peft_model(model, lora_config) |
|
|
| else: |
| rank0_print("Adding LoRA adapters...") |
| model = get_peft_model(model, lora_config) |
|
|
|
|
| print_trainable_parameters(model) |
|
|
| |
| if model_args.version in conversation_lib.conv_templates: |
| conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version] |
| else: |
| conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"] |
|
|
| |
| model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter |
| model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter |
|
|
| if training_args.training_stage != 3: |
| model.get_model().initialize_vision_modules(model_args=model_args) |
|
|
|
|
| if model_args.tune_mm_mlp_adapter: |
| model.requires_grad_(False) |
| for p in model.get_model().mm_projector.parameters(): |
| p.requires_grad = True |
|
|
| |
| if training_args.freeze_mm_mlp_adapter: |
| for p in model.get_model().mm_projector.parameters(): |
| p.requires_grad = False |
|
|
| if training_args.bits in [4, 8]: |
| model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device) |
|
|
| if training_args.bits in [4, 8]: |
| from peft.tuners.lora import LoraLayer |
| for name, module in model.named_modules(): |
| if isinstance(module, LoraLayer): |
| if training_args.bf16: |
| module = module.to(torch.bfloat16) |
| if 'norm' in name: |
| module = module.to(torch.float32) |
| if 'lm_head' in name or 'embed_tokens' in name: |
| if hasattr(module, 'weight'): |
| if training_args.bf16 and module.weight.dtype == torch.float32: |
| module = module.to(torch.bfloat16) |
|
|
| data_module = make_supervised_data_module(tokenizer=tokenizer, |
| data_args=data_args) |
| trainer = VTimeLLMTrainer(model=model, |
| tokenizer=tokenizer, |
| args=training_args, |
| **data_module) |
|
|
| if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")): |
| trainer.train(resume_from_checkpoint=True) |
| else: |
| trainer.train() |
| trainer.save_state() |
|
|
| model.config.use_cache = True |
|
|
| if training_args.lora_enable: |
| state_dict = get_peft_state_maybe_zero_3( |
| model.named_parameters(), training_args.lora_bias |
| ) |
| non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3( |
| model.named_parameters() |
| ) |
| if training_args.local_rank == 0 or training_args.local_rank == -1: |
| model.config.save_pretrained(training_args.output_dir) |
| model.save_pretrained(training_args.output_dir, state_dict=state_dict) |
| torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, 'non_lora_trainables.bin')) |
| else: |
| safe_save_model_for_hf_trainer(trainer=trainer, |
| output_dir=training_args.output_dir) |
|
|
|
|
| if __name__ == "__main__": |
| train() |
|
|