tinyMind-SFT / modeling_tinymind.py
HenrySentinel's picture
Add modeling_tinymind.py
a42373f verified
"""TinyMind model - HuggingFace compatible wrapper.
Matches the original pytorch_model.bin parameter names exactly:
model.token_embedding.weight, model.position_embedding.weight,
model.blocks.{i}.ln1.weight/bias, model.blocks.{i}.attn.qkv.weight,
model.blocks.{i}.attn.proj.weight/bias, model.blocks.{i}.ln2.weight/bias,
model.blocks.{i}.ff.net.0.weight/bias, model.blocks.{i}.ff.net.3.weight/bias,
model.ln_f.weight/bias, model.head.weight
"""
import math
import torch
import torch.nn as nn
from transformers import PreTrainedModel, GenerationMixin
from transformers.modeling_outputs import CausalLMOutputWithPast
from configuration_tinymind import TinyMindConfig
class TinyMindAttention(nn.Module):
def __init__(self, config: TinyMindConfig):
super().__init__()
self.n_heads = config.n_heads
self.head_dim = config.n_embd // config.n_heads
# Original: qkv is bias=False (768, 256), proj has bias (256, 256)
self.qkv = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.attn_drop = nn.Dropout(config.dropout)
def forward(self, x, attention_mask=None):
B, T, C = x.shape
q, k, v = self.qkv(x).split(C, dim=2)
q = q.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
k = k.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
v = v.view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
scale = math.sqrt(self.head_dim)
scores = torch.matmul(q, k.transpose(-2, -1)) / scale
# Causal mask
causal = torch.tril(torch.ones(T, T, device=x.device, dtype=torch.bool))
scores = scores.masked_fill(~causal.view(1, 1, T, T), float('-inf'))
if attention_mask is not None:
# HF convention: 0 = masked, 1 = attend
# Convert to additive mask: 0 → 0, 0-positions → -inf
attn_mask = (1.0 - attention_mask[:, None, None, :].float()) * torch.finfo(scores.dtype).min
scores = scores + attn_mask
weights = self.attn_drop(torch.softmax(scores, dim=-1))
out = torch.matmul(weights, v)
out = out.transpose(1, 2).contiguous().view(B, T, C)
return self.proj(out)
class TinyMindFF(nn.Module):
"""Matches original: ff.net.0 = Linear, ff.net.3 = Linear (with GELU + Dropout in between)"""
def __init__(self, config: TinyMindConfig):
super().__init__()
# Original uses nn.Sequential with indices 0, 1(GELU), 2(Dropout), 3
self.net = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd), # net.0
nn.GELU(), # net.1
nn.Dropout(config.dropout), # net.2
nn.Linear(4 * config.n_embd, config.n_embd), # net.3
nn.Dropout(config.dropout), # net.4
)
def forward(self, x):
return self.net(x)
class TinyMindBlock(nn.Module):
def __init__(self, config: TinyMindConfig):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.attn = TinyMindAttention(config)
self.ln2 = nn.LayerNorm(config.n_embd)
self.ff = TinyMindFF(config)
def forward(self, x, attention_mask=None):
x = x + self.attn(self.ln1(x), attention_mask=attention_mask)
x = x + self.ff(self.ln2(x))
return x
class TinyMindModel(nn.Module):
"""Inner model matching original 'model.*' weight prefix."""
def __init__(self, config: TinyMindConfig):
super().__init__()
self.token_embedding = nn.Embedding(config.vocab_size, config.n_embd)
self.position_embedding = nn.Embedding(config.max_seq_len, config.n_embd)
self.drop = nn.Dropout(config.dropout)
self.blocks = nn.ModuleList([TinyMindBlock(config) for _ in range(config.n_layers)])
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.vocab_size, config.n_embd, bias=False) # placeholder, will be tied
def forward(self, input_ids, attention_mask=None):
B, T = input_ids.shape
pos = torch.arange(T, device=input_ids.device).unsqueeze(0)
x = self.drop(self.token_embedding(input_ids) + self.position_embedding(pos))
for block in self.blocks:
x = block(x, attention_mask=attention_mask)
x = self.ln_f(x)
return x
class TinyMindForCausalLM(PreTrainedModel, GenerationMixin):
config_class = TinyMindConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_tied_weights_keys = {"model.head.weight": "model.token_embedding.weight"}
def __init__(self, config: TinyMindConfig):
super().__init__(config)
# Architecture matches original weight names under 'model.*'
self.model = TinyMindModel(config)
# LM head - will be weight-tied with token embedding
self.model.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Weight tying
self.model.head.weight = self.model.token_embedding.weight
self.post_init()
def _tie_weights(self):
self.model.head.weight = self.model.token_embedding.weight
def get_input_embeddings(self):
return self.model.token_embedding
def set_input_embeddings(self, value):
self.model.token_embedding = value
def get_output_embeddings(self):
return self.model.head
def set_output_embeddings(self, new_embeddings):
self.model.head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **kwargs):
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
def forward(
self,
input_ids=None,
attention_mask=None,
labels=None,
**kwargs,
):
B, T = input_ids.shape
pos = torch.arange(T, device=input_ids.device).unsqueeze(0)
x = self.model.drop(
self.model.token_embedding(input_ids) + self.model.position_embedding(pos)
)
for block in self.model.blocks:
x = block(x, attention_mask=attention_mask)
x = self.model.ln_f(x)
logits = self.model.head(x)
loss = None
if labels is not None:
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = nn.functional.cross_entropy(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1),
ignore_index=-100,
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)