| | |
| | from transformers import AutoModelForCausalLM |
| | from peft import LoraConfig, get_peft_model |
| | from safetensors.torch import load_file |
| | import glob |
| | import torch |
| | import torch.nn as nn |
| | from huggingface_hub import hf_hub_download |
| |
|
| | def load_safetensor_from_hf(repo_id, filename, repo_type="dataset"): |
| | cached_path = hf_hub_download( |
| | repo_id=repo_id, |
| | filename=filename, |
| | repo_type=repo_type, |
| | local_files_only=True |
| | ) |
| | return load_file(cached_path) |
| |
|
| | def load_pretrain(model, pretrain_ckpt_path): |
| | print(f"π Loading pretrained weights from: {str(pretrain_ckpt_path)}") |
| | |
| | |
| | |
| | model_weight_path_pattern = pretrain_ckpt_path + "/model*.safetensors" |
| | model_weight_paths = glob.glob(model_weight_path_pattern) |
| |
|
| | if len(model_weight_paths) == 0: |
| | raise FileNotFoundError(f"β Cannot find any .safetensors file in {str(pretrain_ckpt_path)}") |
| |
|
| | |
| | weights = {} |
| | for model_weight_path in model_weight_paths: |
| | print(f"π₯ Loading weights from: {model_weight_path}") |
| | weights.update(load_file(model_weight_path, device="cpu")) |
| |
|
| | |
| | result = model.load_state_dict(weights, strict=False) |
| | |
| | model_keys = set(model.state_dict().keys()) |
| | loaded_keys = model_keys.intersection(weights.keys()) |
| | missing_keys = result.missing_keys |
| | unexpected_keys = result.unexpected_keys |
| | breakpoint() |
| | print(f"β
Loaded keys: {len(loaded_keys)} / {len(model_keys)}") |
| | print(f"β Missing keys: {len(missing_keys)}") |
| | print(f"β οΈ Unexpected keys: {len(unexpected_keys)}") |
| | |
| | |
| | class RepModel(nn.Module): |
| | def __init__(self): |
| | super(RepModel, self).__init__() |
| | |
| | model_root = 'fg-clip-base' |
| |
|
| | lora_config = LoraConfig( |
| | r=32, |
| | lora_alpha=64, |
| | target_modules=["q_proj", "v_proj", "k_proj", "fc1", "fc2"], |
| | lora_dropout=0.05, |
| | bias="none", |
| | task_type="FEATURE_EXTRACTION" |
| | ) |
| |
|
| | |
| | target_model = AutoModelForCausalLM.from_pretrained( |
| | model_root, |
| | trust_remote_code=True |
| | ) |
| | self.target_model = get_peft_model(target_model, lora_config) |
| |
|
| | |
| | self.target_model.print_trainable_parameters() |
| |
|
| |
|
| | def get_image_feature(self, point_map): |
| | return self.target_model.get_image_features(point_map) |
| |
|
| | def forward(self, data_dict): |
| | point_map = data_dict['point_map'] |
| | |
| | self.target_model.get_image_features(point_map) |
| | |
| | |
| | |
| | ckpt_path = '/home/m50048399/transfered/ye_project/checkpoints/sceneverse_scannet_exp1_b64_Pretrain_all_scannet_training_run1/poma/ckpt' |
| | model = RepModel() |
| | load_pretrain(model, ckpt_path) |