| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
| use crate::attention::{AttentionCache, AttentionConfig, MultiHeadAttention}; |
| use crate::ffn::{FfnCache, FfnConfig, FeedForward}; |
|
|
| |
| |
| |
|
|
| |
| pub struct EncoderLayerCache { |
| |
| pub ln1_input: Tensor, |
| |
| pub attn_cache: AttentionCache, |
| |
| pub ln2_input: Tensor, |
| |
| pub ffn_cache: FfnCache, |
| |
| pub residual_input: Tensor, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct EncoderConfig { |
| |
| pub n_layers: usize, |
| |
| pub d_model: usize, |
| |
| pub n_heads: usize, |
| |
| pub d_ff: usize, |
| |
| pub max_seq_len: usize, |
| |
| pub ln_eps: f32, |
| } |
|
|
| impl EncoderConfig { |
| |
| pub fn spf_writer() -> Self { |
| Self { |
| n_layers: 6, |
| d_model: 256, |
| n_heads: 8, |
| d_ff: 1024, |
| max_seq_len: 2048, |
| ln_eps: 1e-5, |
| } |
| } |
|
|
| |
| pub fn small() -> Self { |
| Self { |
| n_layers: 2, |
| d_model: 64, |
| n_heads: 4, |
| d_ff: 256, |
| max_seq_len: 512, |
| ln_eps: 1e-5, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| pub struct EncoderLayer { |
| |
| pub self_attn: MultiHeadAttention, |
| |
| pub ffn: FeedForward, |
| |
| pub ln1_gamma: Tensor, |
| pub ln1_beta: Tensor, |
| |
| pub ln2_gamma: Tensor, |
| pub ln2_beta: Tensor, |
| |
| ln_eps: f32, |
| } |
|
|
| impl EncoderLayer { |
| |
| pub fn new(d_model: usize, n_heads: usize, d_ff: usize, ln_eps: f32, seed: u64) -> Self { |
| let attn_config = AttentionConfig { |
| d_model, |
| n_heads, |
| causal: false, |
| }; |
| let ffn_config = FfnConfig { d_model, d_ff }; |
|
|
| Self { |
| self_attn: MultiHeadAttention::new(attn_config, seed), |
| ffn: FeedForward::new(ffn_config, seed + 100), |
| ln1_gamma: Tensor::ones(&[d_model]), |
| ln1_beta: Tensor::zeros(&[d_model]), |
| ln2_gamma: Tensor::ones(&[d_model]), |
| ln2_beta: Tensor::zeros(&[d_model]), |
| ln_eps, |
| } |
| } |
|
|
| |
| pub fn forward(&self, x: &Tensor) -> Result<Tensor, String> { |
| |
| let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; |
| let attn_out = self.self_attn.forward(&normed)?; |
| let x = x.add(&attn_out)?; |
|
|
| |
| let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?; |
| let ffn_out = self.ffn.forward(&normed)?; |
| x.add(&ffn_out) |
| } |
|
|
| |
| |
| pub fn forward_with_cache(&self, x: &Tensor) -> Result<(Tensor, EncoderLayerCache), String> { |
| let residual_input = x.clone(); |
|
|
| |
| let ln1_input = x.clone(); |
| let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; |
| let (attn_out, attn_cache) = self.self_attn.forward_with_cache(&normed)?; |
| let x = x.add(&attn_out)?; |
|
|
| |
| let ln2_input = x.clone(); |
| let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?; |
| let (ffn_out, ffn_cache) = self.ffn.forward_with_cache(&normed)?; |
| let output = x.add(&ffn_out)?; |
|
|
| let cache = EncoderLayerCache { |
| ln1_input, |
| attn_cache, |
| ln2_input, |
| ffn_cache, |
| residual_input, |
| }; |
|
|
| Ok((output, cache)) |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let d = self.ln1_gamma.numel(); |
| self.self_attn.num_params() + self.ffn.num_params() + 4 * d |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| let mut w = self.self_attn.weights(); |
| w.extend(self.ffn.weights()); |
| w.extend([&self.ln1_gamma, &self.ln1_beta, &self.ln2_gamma, &self.ln2_beta]); |
| w |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| let mut w = self.self_attn.weights_mut(); |
| w.extend(self.ffn.weights_mut()); |
| w.extend([&mut self.ln1_gamma, &mut self.ln1_beta, &mut self.ln2_gamma, &mut self.ln2_beta]); |
| w |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| pub fn sinusoidal_positional_encoding(max_seq_len: usize, d_model: usize) -> Tensor { |
| let mut data = vec![0.0f32; max_seq_len * d_model]; |
| for pos in 0..max_seq_len { |
| for i in 0..d_model / 2 { |
| let angle = pos as f32 / (10000.0_f32).powf(2.0 * i as f32 / d_model as f32); |
| data[pos * d_model + 2 * i] = angle.sin(); |
| data[pos * d_model + 2 * i + 1] = angle.cos(); |
| } |
| |
| if d_model % 2 == 1 { |
| let angle = pos as f32 / (10000.0_f32).powf((d_model - 1) as f32 / d_model as f32); |
| data[pos * d_model + d_model - 1] = angle.sin(); |
| } |
| } |
| Tensor { data, shape: vec![max_seq_len, d_model] } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct Encoder { |
| pub config: EncoderConfig, |
| |
| pub pos_encoding: Tensor, |
| |
| pub layers: Vec<EncoderLayer>, |
| |
| pub final_ln_gamma: Tensor, |
| pub final_ln_beta: Tensor, |
| } |
|
|
| impl Encoder { |
| |
| pub fn new(config: EncoderConfig, seed: u64) -> Self { |
| let pos_encoding = sinusoidal_positional_encoding(config.max_seq_len, config.d_model); |
|
|
| let layers: Vec<EncoderLayer> = (0..config.n_layers) |
| .map(|i| { |
| EncoderLayer::new( |
| config.d_model, |
| config.n_heads, |
| config.d_ff, |
| config.ln_eps, |
| seed + (i as u64) * 1000, |
| ) |
| }) |
| .collect(); |
|
|
| Self { |
| final_ln_gamma: Tensor::ones(&[config.d_model]), |
| final_ln_beta: Tensor::zeros(&[config.d_model]), |
| pos_encoding, |
| layers, |
| config, |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| pub fn forward(&self, embeddings: &Tensor) -> Result<Tensor, String> { |
| let seq_len = embeddings.shape[1]; |
| if seq_len > self.config.max_seq_len { |
| return Err(format!( |
| "Sequence length {} exceeds max {}", |
| seq_len, self.config.max_seq_len |
| )); |
| } |
|
|
| |
| let pos_enc = self.pos_encoding.slice(0, seq_len)?; |
|
|
| |
| let batch = embeddings.shape[0]; |
| let d_model = embeddings.shape[2]; |
| let mut x_data = embeddings.data.clone(); |
| for b in 0..batch { |
| for s in 0..seq_len { |
| for d in 0..d_model { |
| x_data[(b * seq_len + s) * d_model + d] += pos_enc.data[s * d_model + d]; |
| } |
| } |
| } |
| let mut x = Tensor::from_data(x_data, embeddings.shape.clone())?; |
|
|
| |
| for layer in &self.layers { |
| x = layer.forward(&x)?; |
| } |
|
|
| |
| x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps) |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let layer_params: usize = self.layers.iter().map(|l| l.num_params()).sum(); |
| layer_params + 2 * self.config.d_model |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| let mut w: Vec<&Tensor> = Vec::new(); |
| for layer in &self.layers { |
| w.extend(layer.weights()); |
| } |
| w.push(&self.final_ln_gamma); |
| w.push(&self.final_ln_beta); |
| w |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| let mut w: Vec<&mut Tensor> = Vec::new(); |
| for layer in &mut self.layers { |
| w.extend(layer.weights_mut()); |
| } |
| w.push(&mut self.final_ln_gamma); |
| w.push(&mut self.final_ln_beta); |
| w |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_positional_encoding_shape() { |
| let pe = sinusoidal_positional_encoding(100, 64); |
| assert_eq!(pe.shape, vec![100, 64]); |
| } |
|
|
| #[test] |
| fn test_positional_encoding_values() { |
| let pe = sinusoidal_positional_encoding(10, 8); |
| |
| assert!((pe.data[0] - 0.0).abs() < 1e-5); |
| assert!((pe.data[1] - 1.0).abs() < 1e-5); |
| |
| assert!(pe.data.iter().all(|&v| v >= -1.0 && v <= 1.0)); |
| } |
|
|
| #[test] |
| fn test_encoder_layer_shape() { |
| let layer = EncoderLayer::new(64, 4, 256, 1e-5, 42); |
| let x = Tensor::randn(&[2, 8, 64], 99); |
| let out = layer.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![2, 8, 64]); |
| } |
|
|
| #[test] |
| fn test_encoder_layer_residual() { |
| let layer = EncoderLayer::new(64, 4, 256, 1e-5, 42); |
| let x = Tensor::randn(&[1, 4, 64], 99); |
| let out = layer.forward(&x).unwrap(); |
| |
| |
| let diff: f32 = x.data.iter().zip(&out.data) |
| .map(|(a, b)| (a - b).abs()) |
| .sum::<f32>() / x.numel() as f32; |
| assert!(diff < 10.0, "Output diverged too far from input: {}", diff); |
| } |
|
|
| #[test] |
| fn test_encoder_full_forward() { |
| let config = EncoderConfig::small(); |
| let encoder = Encoder::new(config, 42); |
| let x = Tensor::randn(&[1, 8, 64], 99); |
| let out = encoder.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![1, 8, 64]); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_encoder_seq_exceeds_max() { |
| let config = EncoderConfig { max_seq_len: 10, ..EncoderConfig::small() }; |
| let encoder = Encoder::new(config, 42); |
| let x = Tensor::randn(&[1, 20, 64], 99); |
| assert!(encoder.forward(&x).is_err()); |
| } |
|
|
| #[test] |
| fn test_encoder_num_params() { |
| let config = EncoderConfig::small(); |
| let encoder = Encoder::new(config, 42); |
| let params = encoder.num_params(); |
| |
| |
| assert_eq!(params, 100096); |
| } |
|
|
| #[test] |
| fn test_encoder_weights_collection() { |
| let config = EncoderConfig::small(); |
| let encoder = Encoder::new(config, 42); |
| let weights = encoder.weights(); |
| |
| assert_eq!(weights.len(), 34); |
| } |
| } |
|
|