| """TinyMind model - HuggingFace compatible wrapper.""" |
| import math |
| import torch |
| import torch.nn as nn |
| from transformers import PreTrainedModel, GenerationMixin |
| from transformers.modeling_outputs import CausalLMOutputWithPast |
| from configuration_tinymind import TinyMindConfig |
|
|
|
|
| class TinyMindAttention(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.n_heads = config.n_heads |
| self.head_dim = config.n_embd // config.n_heads |
| self.qkv = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False) |
| self.proj = nn.Linear(config.n_embd, config.n_embd) |
| self.attn_drop = nn.Dropout(config.dropout) |
|
|
| def forward(self, x, attention_mask=None): |
| B, T, C = x.shape |
| q, k, v = self.qkv(x).split(C, dim=2) |
| q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) |
| k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) |
| v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2) |
| scale = math.sqrt(self.head_dim) |
| scores = torch.matmul(q, k.transpose(-2, -1)) / scale |
| causal = torch.tril(torch.ones(T, T, device=x.device, dtype=torch.bool)) |
| scores = scores.masked_fill(~causal.view(1, 1, T, T), float('-inf')) |
| if attention_mask is not None: |
| attn_mask = (1.0 - attention_mask[:, None, None, :].float()) * torch.finfo(scores.dtype).min |
| scores = scores + attn_mask |
| weights = self.attn_drop(torch.softmax(scores, dim=-1)) |
| out = torch.matmul(weights, v) |
| out = out.transpose(1, 2).contiguous().view(B, T, C) |
| return self.proj(out) |
|
|
|
|
| class TinyMindFF(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.net = nn.Sequential( |
| nn.Linear(config.n_embd, 4 * config.n_embd), |
| nn.GELU(), |
| nn.Dropout(config.dropout), |
| nn.Linear(4 * config.n_embd, config.n_embd), |
| nn.Dropout(config.dropout), |
| ) |
|
|
| def forward(self, x): |
| return self.net(x) |
|
|
|
|
| class TinyMindBlock(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.ln1 = nn.LayerNorm(config.n_embd) |
| self.attn = TinyMindAttention(config) |
| self.ln2 = nn.LayerNorm(config.n_embd) |
| self.ff = TinyMindFF(config) |
|
|
| def forward(self, x, attention_mask=None): |
| x = x + self.attn(self.ln1(x), attention_mask=attention_mask) |
| x = x + self.ff(self.ln2(x)) |
| return x |
|
|
|
|
| class TinyMindModel(nn.Module): |
| def __init__(self, config): |
| super().__init__() |
| self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd) |
| self.position_embedding = nn.Embedding(config.max_seq_len, config.n_embd) |
| self.drop = nn.Dropout(config.dropout) |
| self.blocks = nn.ModuleList([TinyMindBlock(config) for _ in range(config.n_layers)]) |
| self.ln_f = nn.LayerNorm(config.n_embd) |
| self.head = nn.Linear(config.vocab_size, config.n_embd, bias=False) |
|
|
|
|
| class TinyMindForCausalLM(PreTrainedModel, GenerationMixin): |
| config_class = TinyMindConfig |
| base_model_prefix = "model" |
| supports_gradient_checkpointing = True |
| _tied_weights_keys = {"model.head.weight": "model.token_embedding.weight"} |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.model = TinyMindModel(config) |
| self.model.head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.model.head.weight = self.model.token_embedding.weight |
| self.post_init() |
|
|
| def _tie_weights(self): |
| self.model.head.weight = self.model.token_embedding.weight |
|
|
| def get_input_embeddings(self): |
| return self.model.token_embedding |
|
|
| def set_input_embeddings(self, value): |
| self.model.token_embedding = value |
|
|
| def get_output_embeddings(self): |
| return self.model.head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.model.head = new_embeddings |
|
|
| def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs): |
| return {"input_ids": input_ids, "attention_mask": attention_mask} |
|
|
| def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs): |
| B, T = input_ids.shape |
| pos = torch.arange(T, device=input_ids.device).unsqueeze(0) |
| x = self.model.drop(self.model.token_embedding(input_ids) + self.model.position_embedding(pos)) |
| for block in self.model.blocks: |
| x = block(x, attention_mask=attention_mask) |
| x = self.model.ln_f(x) |
| logits = self.model.head(x) |
| loss = None |
| if labels is not None: |
| shift_logits = logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| loss = nn.functional.cross_entropy(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1), ignore_index=-100) |
| return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=None, hidden_states=None, attentions=None) |
|
|