| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | import os |
| | import math |
| | from typing import Optional, Union |
| |
|
| | from transformers import PretrainedConfig |
| | from transformers.utils import logging |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | class PhiConfig(PretrainedConfig): |
| | """Phi configuration.""" |
| |
|
| | model_type = "phi-msft" |
| | attribute_map = { |
| | "max_position_embeddings": "n_positions", |
| | "hidden_size": "n_embd", |
| | "num_attention_heads": "n_head", |
| | "num_hidden_layers": "n_layer", |
| | } |
| |
|
| | def __init__( |
| | self, |
| | vocab_size: int = 50304, |
| | n_positions: int = 2048, |
| | n_embd: int = 1024, |
| | n_layer: int = 20, |
| | n_inner: Optional[int] = None, |
| | n_head: int = 16, |
| | n_head_kv: Optional[int] = None, |
| | rotary_dim: Optional[int] = 32, |
| | activation_function: Optional[str] = "gelu_new", |
| | flash_attn: bool = False, |
| | flash_rotary: bool = False, |
| | fused_dense: bool = False, |
| | attn_pdrop: float = 0.0, |
| | embd_pdrop: float = 0.0, |
| | resid_pdrop: float = 0.0, |
| | layer_norm_epsilon: float = 1e-5, |
| | initializer_range: float = 0.02, |
| | tie_word_embeddings: bool = False, |
| | pad_vocab_size_multiple: int = 64, |
| | **kwargs |
| | ) -> None: |
| | self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple) |
| | self.n_positions = n_positions |
| | self.n_embd = n_embd |
| | self.n_layer = n_layer |
| | self.n_inner = n_inner |
| | self.n_head = n_head |
| | self.n_head_kv = n_head_kv |
| | self.rotary_dim = min(rotary_dim, n_embd // n_head) |
| | self.activation_function = activation_function |
| | self.flash_attn = flash_attn |
| | self.flash_rotary = flash_rotary |
| | self.fused_dense = fused_dense |
| | self.attn_pdrop = attn_pdrop |
| | self.embd_pdrop = embd_pdrop |
| | self.resid_pdrop = resid_pdrop |
| | self.layer_norm_epsilon = layer_norm_epsilon |
| | self.initializer_range = initializer_range |
| |
|
| | super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) |
| |
|
| |
|
| |
|
| | class SiglipVisionConfig(PretrainedConfig): |
| |
|
| | model_type = "siglip_vision_model" |
| |
|
| | def __init__( |
| | self, |
| | hidden_size=768, |
| | intermediate_size=3072, |
| | num_hidden_layers=12, |
| | num_attention_heads=12, |
| | num_channels=3, |
| | image_size=224, |
| | patch_size=16, |
| | hidden_act="gelu_pytorch_tanh", |
| | layer_norm_eps=1e-6, |
| | attention_dropout=0.0, |
| | **kwargs, |
| | ): |
| | super().__init__(**kwargs) |
| |
|
| | self.hidden_size = hidden_size |
| | self.intermediate_size = intermediate_size |
| | self.num_hidden_layers = num_hidden_layers |
| | self.num_attention_heads = num_attention_heads |
| | self.num_channels = num_channels |
| | self.patch_size = patch_size |
| | self.image_size = image_size |
| | self.attention_dropout = attention_dropout |
| | self.layer_norm_eps = layer_norm_eps |
| | self.hidden_act = hidden_act |
| |
|
| | @classmethod |
| | def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
| | cls._set_token_in_kwargs(kwargs) |
| |
|
| | config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
| |
|
| | |
| | if config_dict.get("model_type") == "siglip": |
| | config_dict = config_dict["vision_config"] |
| |
|
| | if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
| | logger.warning( |
| | f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
| | f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
| | ) |
| |
|
| | return cls.from_dict(config_dict, **kwargs) |
| |
|
| |
|
| | class ImpConfig(PhiConfig): |
| | model_type = "imp" |
| |
|
| | def __init__(self, **kwargs): |
| | super().__init__(**kwargs) |
| | self.image_token_index = getattr(self, "image_token_index", 50296) |
| | self.image_token = getattr(self, "image_token", "<image>") |
| |
|
| | if not hasattr(self, "vision_tower_config") and hasattr(self, "mm_vision_tower"): |
| | vision_tower_config = SiglipVisionConfig.from_pretrained(self.mm_vision_tower) |
| | self.vision_tower_config = vision_tower_config.to_diff_dict() |
| | |
| | @property |
| | def vision_tower_cfg(self): |
| | cfg = SiglipVisionConfig.from_dict(self.vision_tower_config) |
| | |
| | |
| | cfg.mm_vision_select_layer = self.mm_vision_select_layer |
| | cfg.mm_vision_tower = self.mm_vision_tower |
| | return cfg |
| |
|