| | from typing import Callable, Optional, Tuple |
| |
|
| | import torch |
| | import torch.nn.functional as F |
| | from torch import nn |
| |
|
| | from transformers.cache_utils import Cache |
| | from transformers.modeling_flash_attention_utils import FlashAttentionKwargs |
| | from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update |
| | from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel |
| | from transformers.processing_utils import Unpack |
| | from transformers.utils import logging |
| | from .configuration_smallthinker import SmallThinkerConfig |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | @torch.jit.script |
| | def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: |
| | """ |
| | This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, |
| | num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) |
| | """ |
| | batch, num_key_value_heads, slen, head_dim = hidden_states.shape |
| | if n_rep == 1: |
| | return hidden_states |
| | hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) |
| | return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) |
| |
|
| |
|
| | def rotate_half(x): |
| | """Rotates half the hidden dims of the input.""" |
| | x1 = x[..., : x.shape[-1] // 2] |
| | x2 = x[..., x.shape[-1] // 2 :] |
| | return torch.cat((-x2, x1), dim=-1) |
| |
|
| |
|
| | def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): |
| | """Applies Rotary Position Embedding to the query and key tensors. |
| | |
| | Args: |
| | q (`torch.Tensor`): The query tensor. |
| | k (`torch.Tensor`): The key tensor. |
| | cos (`torch.Tensor`): The cosine part of the rotary embedding. |
| | sin (`torch.Tensor`): The sine part of the rotary embedding. |
| | position_ids (`torch.Tensor`, *optional*): |
| | Deprecated and unused. |
| | unsqueeze_dim (`int`, *optional*, defaults to 1): |
| | The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
| | sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
| | that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
| | k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
| | cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
| | the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
| | Returns: |
| | `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
| | """ |
| | cos = cos.unsqueeze(unsqueeze_dim) |
| | sin = sin.unsqueeze(unsqueeze_dim) |
| | q_embed = (q * cos) + (rotate_half(q) * sin) |
| | k_embed = (k * cos) + (rotate_half(k) * sin) |
| | return q_embed, k_embed |
| |
|
| |
|
| | def check_is_swa_layer(config, layer_idx): |
| | """ |
| | Check if the current layer is a sliding window attention layer. |
| | """ |
| | if not hasattr(config, "sliding_window_layout"): |
| | return False |
| | elif config.sliding_window_layout is None: |
| | return False |
| | else: |
| | return config.sliding_window_layout[layer_idx] == 1 |
| |
|
| |
|
| | class SmallThinkerRMSNorm(nn.Module): |
| | def __init__(self, hidden_size, eps=1e-6): |
| | """ |
| | SmallThinkerRMSNorm is equivalent to T5LayerNorm |
| | """ |
| | super().__init__() |
| | self.weight = nn.Parameter(torch.ones(hidden_size)) |
| | self.variance_epsilon = eps |
| |
|
| | def forward(self, hidden_states): |
| | input_dtype = hidden_states.dtype |
| | hidden_states = hidden_states.to(torch.float32) |
| | variance = hidden_states.pow(2).mean(-1, keepdim=True) |
| | hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
| | return self.weight * hidden_states.to(input_dtype) |
| |
|
| | def extra_repr(self): |
| | return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" |
| | |
| |
|
| | class SmallThinkerRotaryEmbedding(nn.Module): |
| | def __init__(self, config: SmallThinkerConfig, device=None): |
| | super().__init__() |
| | if hasattr(config, "rope_scaling") and config.rope_scaling is not None: |
| | self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type")) |
| | else: |
| | self.rope_type = "default" |
| | self.max_seq_len_cached = config.max_position_embeddings |
| | self.original_max_seq_len = config.max_position_embeddings |
| |
|
| | self.config = config |
| | self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] |
| |
|
| | inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device) |
| | self.register_buffer("inv_freq", inv_freq, persistent=False) |
| | self.original_inv_freq = self.inv_freq |
| |
|
| | @torch.no_grad() |
| | @dynamic_rope_update |
| | def forward(self, x, position_ids): |
| | inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) |
| | position_ids_expanded = position_ids[:, None, :].float() |
| |
|
| | device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" |
| | with torch.autocast(device_type=device_type, enabled=False): |
| | freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) |
| | emb = torch.cat((freqs, freqs), dim=-1) |
| | cos = emb.cos() * self.attention_scaling |
| | sin = emb.sin() * self.attention_scaling |
| |
|
| | return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) |
| |
|
| |
|
| | class SmallThinkerExpert(nn.Module): |
| | def __init__(self, config: SmallThinkerConfig): |
| | super().__init__() |
| | self.hidden_dim = config.hidden_size |
| | self.ffn_dim = config.moe_ffn_hidden_size |
| |
|
| | self.up = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) |
| | self.gate = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False) |
| | self.down = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False) |
| | |
| | def forward(self, hidden_states: torch.Tensor): |
| | current_hidden_states = self.up(hidden_states) * F.relu(self.gate(hidden_states)) |
| | batch_size, _ = current_hidden_states.shape |
| | current_hidden_states = current_hidden_states.view(batch_size, -1) |
| | current_hidden_states = self.down(current_hidden_states) |
| | return current_hidden_states |
| |
|
| |
|
| | class SmallThinkerMoeBlock(nn.Module): |
| | def __init__(self, config: SmallThinkerConfig): |
| | super().__init__() |
| | self.hidden_dim = config.hidden_size |
| | self.num_primary_experts = config.moe_num_primary_experts |
| | self.moe_primary_router_apply_softmax = config.moe_primary_router_apply_softmax |
| | self.num_active_primary_experts = config.moe_num_active_primary_experts |
| | self.primary_router = nn.Linear(self.hidden_dim, self.num_primary_experts, bias=False) |
| | self.experts = nn.ModuleList([SmallThinkerExpert(config) for _ in range(self.num_primary_experts)]) |
| |
|
| | def forward(self, router_input: torch.Tensor, hidden_states: torch.Tensor) -> torch.Tensor: |
| | batch_size, sequence_length, hidden_dim = hidden_states.shape |
| | |
| | hidden_states = hidden_states.view(-1, hidden_dim) |
| | router_input = router_input.view(-1, hidden_dim) |
| | |
| | router_logits = self.primary_router(router_input) |
| |
|
| | router_logits, selected_experts = torch.topk(router_logits, self.num_active_primary_experts, dim=-1) |
| |
|
| | if self.moe_primary_router_apply_softmax: |
| | routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) |
| | else: |
| | routing_weights = F.sigmoid(router_logits) |
| | routing_weights /= routing_weights.sum(dim=-1, keepdim=True) |
| |
|
| | routing_weights = routing_weights.to(hidden_states.dtype) |
| |
|
| | |
| | final_hidden_states = torch.zeros( |
| | (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device |
| | ) |
| |
|
| | |
| | |
| | expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_primary_experts).permute(2, 1, 0) |
| | expert_hitted = (expert_mask.sum(dim=(-1, -2)) > 0).nonzero(as_tuple=True)[0].tolist() |
| |
|
| | for expert_idx in expert_hitted: |
| | expert_layer = self.experts[expert_idx] |
| | idx, top_x = torch.where(expert_mask[expert_idx]) |
| | |
| | |
| | |
| | current_state = hidden_states[top_x].reshape(-1, hidden_dim) |
| | current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] |
| |
|
| | |
| | final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) |
| | final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) |
| | return final_hidden_states, router_logits |
| | |
| |
|
| | def eager_attention_forward( |
| | module: nn.Module, |
| | query: torch.Tensor, |
| | key: torch.Tensor, |
| | value: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor], |
| | scaling: float, |
| | dropout: float = 0.0, |
| | **kwargs, |
| | ): |
| | key_states = repeat_kv(key, module.num_key_value_groups) |
| | value_states = repeat_kv(value, module.num_key_value_groups) |
| |
|
| | attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling |
| | if attention_mask is not None: |
| | causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] |
| | attn_weights = attn_weights + causal_mask |
| |
|
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) |
| | attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) |
| | attn_output = torch.matmul(attn_weights, value_states) |
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| |
|
| | return attn_output, attn_weights |
| |
|
| |
|
| | class SmallThinkerAttention(nn.Module): |
| | def __init__(self, config: SmallThinkerConfig, layer_idx: int): |
| | super().__init__() |
| | self.config = config |
| | self.layer_idx = layer_idx |
| | self.head_dim = config.head_dim |
| | self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads |
| | self.scaling = self.head_dim**-0.5 |
| | self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) |
| | self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) |
| | self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) |
| | self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) |
| | self.sliding_window = config.sliding_window_size if config.sliding_window_layout[layer_idx] else None |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | position_embeddings: Tuple[torch.Tensor, torch.Tensor], |
| | attention_mask: Optional[torch.Tensor], |
| | past_key_value: Optional[Cache] = None, |
| | cache_position: Optional[torch.LongTensor] = None, |
| | **kwargs: Unpack[FlashAttentionKwargs], |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| |
|
| | input_shape = hidden_states.shape[:-1] |
| | hidden_shape = (*input_shape, -1, self.head_dim) |
| |
|
| | query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
| | key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
| | value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) |
| | |
| | if position_embeddings: |
| | cos, sin = position_embeddings |
| | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) |
| | else: |
| | cos, sin = None, None |
| |
|
| | if past_key_value is not None: |
| | cache_kwargs = { |
| | "sin": sin, |
| | "cos": cos, |
| | "cache_position": cache_position, |
| | "sliding_window": self.sliding_window, |
| | } |
| | key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) |
| |
|
| | attention_interface: Callable = eager_attention_forward |
| | if self.config._attn_implementation != "eager": |
| | if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False): |
| | logger.warning_once( |
| | "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to " |
| | 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| | ) |
| | else: |
| | attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] |
| |
|
| | attn_output, attn_weights = attention_interface( |
| | self, |
| | query_states, |
| | key_states, |
| | value_states, |
| | attention_mask, |
| | dropout=0.0, |
| | scaling=self.scaling, |
| | sliding_window=self.sliding_window, |
| | **kwargs, |
| | ) |
| |
|
| | attn_output = attn_output.reshape(*input_shape, -1).contiguous() |
| | attn_output = self.o_proj(attn_output) |
| | return attn_output, attn_weights |
| | |
| |
|
| | class SmallThinkerDecoderLayer(nn.Module): |
| | def __init__(self, config: SmallThinkerConfig, layer_idx: int): |
| | super().__init__() |
| | self.hidden_size = config.hidden_size |
| | self.self_attn = SmallThinkerAttention(config, layer_idx) |
| | self.block_sparse_moe = SmallThinkerMoeBlock(config) |
| | self.input_layernorm = SmallThinkerRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.post_attention_layernorm = SmallThinkerRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.is_swa = check_is_swa_layer(config, layer_idx) |
| |
|
| | if self.is_swa and config._attn_implementation == "sdpa": |
| | logger.warning_once( |
| | f"Sliding Window Attention is enabled but not optimized for `{config._attn_implementation}`; " |
| | "unexpected results may be encountered." |
| | ) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: Optional[bool] = False, |
| | output_router_logits: Optional[bool] = False, |
| | use_cache: Optional[bool] = False, |
| | cache_position: Optional[torch.LongTensor] = None, |
| | position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| | **kwargs: Unpack[FlashAttentionKwargs], |
| | ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: |
| | """ |
| | Args: |
| | hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` |
| | attention_mask (`torch.FloatTensor`, *optional*): attention mask of size |
| | `(batch, sequence_length)` where padding elements are indicated by 0. |
| | past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
| | returned tensors for more detail. |
| | output_router_logits (`bool`, *optional*): |
| | Whether or not to return the logits of all the routers. They are useful for computing the router loss, and |
| | should not be returned during inference. |
| | use_cache (`bool`, *optional*): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding |
| | (see `past_key_values`). |
| | cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): |
| | Indices depicting the position of the input sequence tokens in the sequence. |
| | kwargs (`dict`, *optional*): |
| | Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code |
| | into the model |
| | """ |
| | residual = hidden_states |
| | router_input = hidden_states |
| | hidden_states = self.input_layernorm(hidden_states) |
| | |
| | hidden_states, self_attn_weights = self.self_attn( |
| | hidden_states=hidden_states, |
| | position_embeddings=position_embeddings, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | cache_position=cache_position, |
| | **kwargs, |
| | ) |
| | hidden_states = residual + hidden_states |
| |
|
| | |
| | residual = hidden_states |
| | hidden_states = self.post_attention_layernorm(hidden_states) |
| | hidden_states, router_logits = self.block_sparse_moe(router_input, hidden_states) |
| | hidden_states = residual + hidden_states |
| |
|
| | outputs = (hidden_states,) |
| | if output_attentions: |
| | outputs += (self_attn_weights,) |
| | if output_router_logits: |
| | outputs += (router_logits,) |
| | return outputs |
| | |
| |
|
| | class SmallThinkerPreTrainedModel(PreTrainedModel): |
| | config_class = SmallThinkerConfig |
| | base_model_prefix = "model" |
| | supports_gradient_checkpointing = False |
| | _no_split_modules = ["SmallThinkerDecoderLayer"] |
| | _skip_keys_device_placement = ["past_key_values"] |
| | _supports_flash_attn_2 = True |
| | _supports_sdpa = True |
| | _supports_flex_attn = False |
| | _supports_cache_class = True |
| | _supports_quantized_cache = True |
| | _supports_static_cache = False |
| | _supports_attention_backend = True |
| |
|
| | def _init_weights(self, module): |
| | std = self.config.initializer_range |
| | if isinstance(module, nn.Linear): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=std) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| | elif isinstance(module, SmallThinkerRMSNorm): |
| | module.weight.data.fill_(1.0) |
| |
|