| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
| use crate::tokenizer::{self, Tokenizer, BOS_ID, EOS_ID, PAD_ID}; |
| use crate::encoder::{Encoder, EncoderConfig}; |
| use crate::decoder::{Decoder, DecoderConfig, DecoderLayerCache}; |
|
|
| |
| |
| |
|
|
| |
| pub struct ForwardCache { |
| |
| pub token_indices: Vec<u32>, |
| |
| pub embedded: Tensor, |
| |
| pub decoder_caches: Vec<DecoderLayerCache>, |
| |
| pub decoder_output: Tensor, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct TransformerModelConfig { |
| |
| pub d_model: usize, |
| |
| pub n_heads: usize, |
| |
| pub n_layers: usize, |
| |
| pub d_ff: usize, |
| |
| pub vocab_size: usize, |
| |
| pub max_seq_len: usize, |
| |
| pub ln_eps: f32, |
| } |
|
|
| impl TransformerModelConfig { |
| |
| |
| pub fn spf_writer() -> Self { |
| Self { |
| d_model: 256, |
| n_heads: 8, |
| n_layers: 6, |
| d_ff: 1024, |
| vocab_size: 8192, |
| max_seq_len: 2048, |
| ln_eps: 1e-5, |
| } |
| } |
|
|
| |
| pub fn spf_researcher() -> Self { |
| Self::spf_writer() |
| } |
|
|
| |
| pub fn small() -> Self { |
| Self { |
| d_model: 64, |
| n_heads: 4, |
| n_layers: 2, |
| d_ff: 256, |
| vocab_size: 512, |
| max_seq_len: 128, |
| ln_eps: 1e-5, |
| } |
| } |
|
|
| |
| pub fn estimate_params(&self) -> usize { |
| let d = self.d_model; |
| let ff = self.d_ff; |
| let v = self.vocab_size; |
| let n = self.n_layers; |
|
|
| |
| let embed_params = v * d; |
|
|
| |
| let enc_layer = 4 * d * d + 4 * d + 2 * d * ff + ff + d + 4 * d; |
|
|
| |
| let dec_layer = 2 * (4 * d * d + 4 * d) + 2 * d * ff + ff + d + 6 * d; |
|
|
| |
| let output_params = d * v; |
|
|
| |
| let final_ln = 4 * d; |
|
|
| embed_params + n * enc_layer + n * dec_layer + output_params + final_ln |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub struct SPFTransformer { |
| pub config: TransformerModelConfig, |
| |
| pub token_embedding: Tensor, |
| |
| pub encoder: Encoder, |
| |
| pub decoder: Decoder, |
| |
| pub output_projection: Tensor, |
| |
| pub output_bias: Tensor, |
| } |
|
|
| impl SPFTransformer { |
| |
| pub fn new(config: TransformerModelConfig, seed: u64) -> Self { |
| let enc_config = EncoderConfig { |
| n_layers: config.n_layers, |
| d_model: config.d_model, |
| n_heads: config.n_heads, |
| d_ff: config.d_ff, |
| max_seq_len: config.max_seq_len, |
| ln_eps: config.ln_eps, |
| }; |
| let dec_config = DecoderConfig { |
| n_layers: config.n_layers, |
| d_model: config.d_model, |
| n_heads: config.n_heads, |
| d_ff: config.d_ff, |
| max_seq_len: config.max_seq_len, |
| ln_eps: config.ln_eps, |
| }; |
|
|
| |
| let embed_scale = (6.0 / (config.vocab_size + config.d_model) as f32).sqrt(); |
| let proj_scale = (6.0 / (config.d_model + config.vocab_size) as f32).sqrt(); |
|
|
| Self { |
| token_embedding: Tensor::randn( |
| &[config.vocab_size, config.d_model], seed |
| ).scale(embed_scale), |
| encoder: Encoder::new(enc_config, seed + 10000), |
| decoder: Decoder::new(dec_config, seed + 20000), |
| output_projection: Tensor::randn( |
| &[config.vocab_size, config.d_model], seed + 30000 |
| ).scale(proj_scale), |
| output_bias: Tensor::zeros(&[config.vocab_size]), |
| config, |
| } |
| } |
|
|
| |
| |
| |
| fn embed_tokens(&self, input_ids: &[u32], batch: usize, seq_len: usize) -> Result<Tensor, String> { |
| let d = self.config.d_model; |
| let v = self.config.vocab_size; |
| let mut data = Vec::with_capacity(batch * seq_len * d); |
|
|
| for &id in input_ids { |
| if (id as usize) >= v { |
| return Err(format!("Token ID {} exceeds vocab size {}", id, v)); |
| } |
| let offset = (id as usize) * d; |
| data.extend_from_slice(&self.token_embedding.data[offset..offset + d]); |
| } |
|
|
| Tensor::from_data(data, vec![batch, seq_len, d]) |
| } |
|
|
| |
| |
| |
| |
| pub fn forward( |
| &self, |
| enc_ids: &[u32], enc_batch: usize, enc_seq: usize, |
| dec_ids: &[u32], dec_batch: usize, dec_seq: usize, |
| ) -> Result<Tensor, String> { |
| if enc_batch != dec_batch { |
| return Err("Encoder and decoder batch sizes must match".to_string()); |
| } |
|
|
| |
| let enc_emb = self.embed_tokens(enc_ids, enc_batch, enc_seq)?; |
| let dec_emb = self.embed_tokens(dec_ids, dec_batch, dec_seq)?; |
|
|
| |
| let enc_out = self.encoder.forward(&enc_emb)?; |
|
|
| |
| let dec_out = self.decoder.forward(&dec_emb, &enc_out)?; |
|
|
| |
| self.project_to_logits(&dec_out) |
| } |
|
|
| |
| |
| |
| |
| pub fn forward_causal( |
| &self, |
| ids: &[u32], batch: usize, seq: usize, |
| ) -> Result<Tensor, String> { |
| let emb = self.embed_tokens(ids, batch, seq)?; |
| let dec_out = self.decoder.forward_causal(&emb)?; |
| self.project_to_logits(&dec_out) |
| } |
|
|
| |
| |
| pub fn forward_causal_with_cache( |
| &self, |
| ids: &[u32], batch: usize, seq: usize, |
| ) -> Result<(Tensor, ForwardCache), String> { |
| let token_indices = ids.to_vec(); |
| let emb = self.embed_tokens(ids, batch, seq)?; |
| let embedded = emb.clone(); |
| let (dec_out, decoder_caches) = self.decoder.forward_causal_with_cache(&emb)?; |
| let decoder_output = dec_out.clone(); |
| let logits = self.project_to_logits(&dec_out)?; |
|
|
| let cache = ForwardCache { |
| token_indices, |
| embedded, |
| decoder_caches, |
| decoder_output, |
| }; |
|
|
| Ok((logits, cache)) |
| } |
|
|
| |
| |
| |
| fn project_to_logits(&self, dec_out: &Tensor) -> Result<Tensor, String> { |
| let batch = dec_out.shape[0]; |
| let seq = dec_out.shape[1]; |
| let d = dec_out.shape[2]; |
| let v = self.config.vocab_size; |
|
|
| |
| let flat = dec_out.reshape(&[batch * seq, d])?; |
|
|
| |
| |
| let logits = flat.matmul(&self.output_projection.transpose_2d()?)?; |
|
|
| |
| let biased = logits.add(&self.expand_bias(&self.output_bias, batch * seq))?; |
| biased.reshape(&[batch, seq, v]) |
| } |
|
|
| |
| |
| pub fn generate( |
| &self, |
| prompt_ids: &[u32], |
| max_tokens: usize, |
| temperature: f32, |
| seed: u64, |
| ) -> Result<Vec<u32>, String> { |
| let mut ids = prompt_ids.to_vec(); |
| let mut rng_state = seed; |
|
|
| |
| if ids.is_empty() || ids[0] != BOS_ID { |
| ids.insert(0, BOS_ID); |
| } |
|
|
| for _ in 0..max_tokens { |
| let seq_len = ids.len(); |
| if seq_len >= self.config.max_seq_len { |
| break; |
| } |
|
|
| |
| let logits = self.forward_causal(&ids, 1, seq_len)?; |
|
|
| |
| let last_offset = (seq_len - 1) * self.config.vocab_size; |
| let last_logits = &logits.data[last_offset..last_offset + self.config.vocab_size]; |
|
|
| |
| let scaled: Vec<f32> = if temperature > 0.0 { |
| last_logits.iter().map(|&l| l / temperature).collect() |
| } else { |
| last_logits.to_vec() |
| }; |
|
|
| |
| let logit_tensor = Tensor::from_data(scaled, vec![self.config.vocab_size])?; |
| let probs = logit_tensor.softmax()?; |
|
|
| |
| let next_id = if temperature <= 0.0 { |
| probs.argmax()[0] as u32 |
| } else { |
| |
| rng_state = xorshift64(rng_state); |
| let r = (rng_state as f32) / (u64::MAX as f32); |
| let mut cumsum = 0.0; |
| let mut sampled = 0u32; |
| for (i, &p) in probs.data.iter().enumerate() { |
| cumsum += p; |
| if cumsum >= r { |
| sampled = i as u32; |
| break; |
| } |
| } |
| sampled |
| }; |
|
|
| |
| if next_id == EOS_ID { |
| ids.push(next_id); |
| break; |
| } |
|
|
| ids.push(next_id); |
| } |
|
|
| |
| ids.retain(|&id| id != PAD_ID); |
|
|
| Ok(ids) |
| } |
|
|
| |
| pub fn generate_text( |
| &self, |
| prompt: &str, |
| max_tokens: usize, |
| temperature: f32, |
| seed: u64, |
| tokenizer: &Tokenizer, |
| ) -> Result<String, String> { |
| let input_ids = tokenizer.encode(prompt); |
| let output_ids = self.generate(&input_ids, max_tokens, temperature, seed)?; |
| Ok(tokenizer.decode(&output_ids[input_ids.len()..])) |
| } |
|
|
| |
| |
| |
| |
| |
| pub fn generate_gate_decision( |
| &self, |
| context_ids: &[u32], |
| max_tokens: usize, |
| seed: u64, |
| ) -> Result<(Vec<u32>, bool), String> { |
| |
| let mut ids = vec![BOS_ID, tokenizer::GATE_ID, tokenizer::TOOL_ID]; |
| ids.extend_from_slice(context_ids); |
|
|
| let output = self.generate(&ids, max_tokens, 0.3, seed)?; |
|
|
| |
| let has_allowed = output.iter().any(|&id| id == tokenizer::ALLOWED_ID); |
| let has_blocked = output.iter().any(|&id| id == tokenizer::BLOCKED_ID); |
|
|
| |
| let allowed = has_allowed && !has_blocked; |
|
|
| Ok((output, allowed)) |
| } |
|
|
| |
| fn expand_bias(&self, bias: &Tensor, n_rows: usize) -> Tensor { |
| let d = bias.numel(); |
| let mut data = Vec::with_capacity(n_rows * d); |
| for _ in 0..n_rows { |
| data.extend_from_slice(&bias.data); |
| } |
| Tensor { data, shape: vec![n_rows, d] } |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let embed = self.token_embedding.numel(); |
| let enc = self.encoder.num_params(); |
| let dec = self.decoder.num_params(); |
| let proj = self.output_projection.numel() + self.output_bias.numel(); |
| embed + enc + dec + proj |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| let mut w: Vec<&Tensor> = vec![&self.token_embedding]; |
| w.extend(self.encoder.weights()); |
| w.extend(self.decoder.weights()); |
| w.push(&self.output_projection); |
| w.push(&self.output_bias); |
| w |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| let mut w: Vec<&mut Tensor> = vec![&mut self.token_embedding]; |
| w.extend(self.encoder.weights_mut()); |
| w.extend(self.decoder.weights_mut()); |
| w.push(&mut self.output_projection); |
| w.push(&mut self.output_bias); |
| w |
| } |
| } |
|
|
| |
| fn xorshift64(mut state: u64) -> u64 { |
| state ^= state << 13; |
| state ^= state >> 7; |
| state ^= state << 17; |
| state |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| fn small_config() -> TransformerModelConfig { |
| TransformerModelConfig::small() |
| } |
|
|
| #[test] |
| fn test_forward_causal_shape() { |
| let config = small_config(); |
| let model = SPFTransformer::new(config.clone(), 42); |
| let ids: Vec<u32> = vec![1, 10, 20, 30]; |
| let logits = model.forward_causal(&ids, 1, 4).unwrap(); |
| assert_eq!(logits.shape, vec![1, 4, config.vocab_size]); |
| } |
|
|
| #[test] |
| fn test_forward_encoder_decoder_shape() { |
| let config = small_config(); |
| let model = SPFTransformer::new(config.clone(), 42); |
| let enc_ids: Vec<u32> = vec![1, 10, 20, 30, 2]; |
| let dec_ids: Vec<u32> = vec![1, 40, 50]; |
| let logits = model.forward( |
| &enc_ids, 1, 5, |
| &dec_ids, 1, 3, |
| ).unwrap(); |
| assert_eq!(logits.shape, vec![1, 3, config.vocab_size]); |
| } |
|
|
| #[test] |
| fn test_logits_finite() { |
| let model = SPFTransformer::new(small_config(), 42); |
| let ids: Vec<u32> = vec![1, 5, 10]; |
| let logits = model.forward_causal(&ids, 1, 3).unwrap(); |
| assert!(logits.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_generate_produces_tokens() { |
| let model = SPFTransformer::new(small_config(), 42); |
| let prompt = vec![BOS_ID, 10, 20]; |
| let generated = model.generate(&prompt, 10, 1.0, 42).unwrap(); |
| assert!(generated.len() >= prompt.len()); |
| assert!(generated.len() <= prompt.len() + 10 + 1); |
| } |
|
|
| #[test] |
| fn test_generate_greedy() { |
| let model = SPFTransformer::new(small_config(), 42); |
| let prompt = vec![BOS_ID, 10]; |
| |
| let gen1 = model.generate(&prompt, 5, 0.0, 0).unwrap(); |
| let gen2 = model.generate(&prompt, 5, 0.0, 0).unwrap(); |
| assert_eq!(gen1, gen2); |
| } |
|
|
| #[test] |
| fn test_num_params_small() { |
| let config = small_config(); |
| let model = SPFTransformer::new(config.clone(), 42); |
| let actual = model.num_params(); |
| let estimated = config.estimate_params(); |
| |
| let diff = (actual as f64 - estimated as f64).abs() / actual as f64; |
| assert!(diff < 0.05, "Param count mismatch: actual={}, estimated={}", actual, estimated); |
| } |
|
|
| #[test] |
| fn test_num_params_writer() { |
| let config = TransformerModelConfig::spf_writer(); |
| let estimated = config.estimate_params(); |
| |
| assert!(estimated > 3_000_000, "Writer should have >3M params, got {}", estimated); |
| assert!(estimated < 10_000_000, "Writer should have <10M params, got {}", estimated); |
| } |
|
|
| #[test] |
| fn test_invalid_token_id() { |
| let config = small_config(); |
| let model = SPFTransformer::new(config, 42); |
| let ids: Vec<u32> = vec![999]; |
| assert!(model.forward_causal(&ids, 1, 1).is_err()); |
| } |
|
|
| #[test] |
| fn test_batch_mismatch() { |
| let model = SPFTransformer::new(small_config(), 42); |
| let enc = vec![1, 2, 3]; |
| let dec = vec![1, 2]; |
| assert!(model.forward(&enc, 1, 3, &dec, 2, 1).is_err()); |
| } |
|
|
| #[test] |
| fn test_weights_collection() { |
| let model = SPFTransformer::new(small_config(), 42); |
| let weights = model.weights(); |
| |
| assert!(weights.len() > 30, "Expected many weights, got {}", weights.len()); |
| } |
| } |
|
|