| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
|
|
| |
| |
| |
|
|
| |
| pub struct FfnCache { |
| |
| pub input: Tensor, |
| |
| pub hidden_pre_gelu: Tensor, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct FfnConfig { |
| |
| pub d_model: usize, |
| |
| pub d_ff: usize, |
| } |
|
|
| impl FfnConfig { |
| |
| pub fn default_for(d_model: usize) -> Self { |
| Self { |
| d_model, |
| d_ff: d_model * 4, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| pub struct FeedForward { |
| pub config: FfnConfig, |
| |
| pub w1: Tensor, |
| |
| pub b1: Tensor, |
| |
| pub w2: Tensor, |
| |
| pub b2: Tensor, |
| } |
|
|
| impl FeedForward { |
| |
| |
| pub fn new(config: FfnConfig, seed: u64) -> Self { |
| let d = config.d_model; |
| let ff = config.d_ff; |
|
|
| |
| let scale1 = (6.0 / (d + ff) as f32).sqrt(); |
| let scale2 = (6.0 / (ff + d) as f32).sqrt(); |
|
|
| Self { |
| w1: Tensor::randn(&[ff, d], seed).scale(scale1), |
| b1: Tensor::zeros(&[ff]), |
| w2: Tensor::randn(&[d, ff], seed + 1).scale(scale2), |
| b2: Tensor::zeros(&[d]), |
| config, |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn forward(&self, x: &Tensor) -> Result<Tensor, String> { |
| if x.ndim() != 3 { |
| return Err(format!("FFN expects 3D input [batch, seq, d_model], got {}D", x.ndim())); |
| } |
| let batch = x.shape[0]; |
| let seq_len = x.shape[1]; |
| let d_model = x.shape[2]; |
|
|
| if d_model != self.config.d_model { |
| return Err(format!( |
| "Input d_model {} doesn't match config {}", |
| d_model, self.config.d_model |
| )); |
| } |
|
|
| |
| let x_2d = x.reshape(&[batch * seq_len, d_model])?; |
|
|
| |
| let hidden = x_2d.matmul(&self.w1.transpose_2d()?)? |
| .add(&self.expand_bias(&self.b1, batch * seq_len))?; |
|
|
| |
| let activated = hidden.gelu(); |
|
|
| |
| let output = activated.matmul(&self.w2.transpose_2d()?)? |
| .add(&self.expand_bias(&self.b2, batch * seq_len))?; |
|
|
| |
| output.reshape(&[batch, seq_len, d_model]) |
| } |
|
|
| |
| |
| pub fn forward_with_cache(&self, x: &Tensor) -> Result<(Tensor, FfnCache), String> { |
| if x.ndim() != 3 { |
| return Err(format!("FFN expects 3D input [batch, seq, d_model], got {}D", x.ndim())); |
| } |
| let batch = x.shape[0]; |
| let seq_len = x.shape[1]; |
| let d_model = x.shape[2]; |
|
|
| if d_model != self.config.d_model { |
| return Err(format!("Input d_model {} doesn't match config {}", d_model, self.config.d_model)); |
| } |
|
|
| let x_2d = x.reshape(&[batch * seq_len, d_model])?; |
| let input_cache = x_2d.clone(); |
|
|
| |
| let hidden = x_2d.matmul(&self.w1.transpose_2d()?)? |
| .add(&self.expand_bias(&self.b1, batch * seq_len))?; |
| let hidden_pre_gelu = hidden.clone(); |
|
|
| |
| let activated = hidden.gelu(); |
|
|
| |
| let output = activated.matmul(&self.w2.transpose_2d()?)? |
| .add(&self.expand_bias(&self.b2, batch * seq_len))?; |
|
|
| let output = output.reshape(&[batch, seq_len, d_model])?; |
|
|
| let cache = FfnCache { |
| input: input_cache, |
| hidden_pre_gelu, |
| }; |
|
|
| Ok((output, cache)) |
| } |
|
|
| |
| fn expand_bias(&self, bias: &Tensor, n_rows: usize) -> Tensor { |
| let d = bias.numel(); |
| let mut data = Vec::with_capacity(n_rows * d); |
| for _ in 0..n_rows { |
| data.extend_from_slice(&bias.data); |
| } |
| Tensor { data, shape: vec![n_rows, d] } |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let d = self.config.d_model; |
| let ff = self.config.d_ff; |
| |
| ff * d + ff + d * ff + d |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| vec![&self.w1, &self.b1, &self.w2, &self.b2] |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| vec![&mut self.w1, &mut self.b1, &mut self.w2, &mut self.b2] |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| fn test_config() -> FfnConfig { |
| FfnConfig { d_model: 64, d_ff: 256 } |
| } |
|
|
| #[test] |
| fn test_ffn_output_shape() { |
| let ffn = FeedForward::new(test_config(), 42); |
| let x = Tensor::randn(&[2, 8, 64], 99); |
| let out = ffn.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![2, 8, 64]); |
| } |
|
|
| #[test] |
| fn test_ffn_values_finite() { |
| let ffn = FeedForward::new(test_config(), 42); |
| let x = Tensor::randn(&[1, 4, 64], 99); |
| let out = ffn.forward(&x).unwrap(); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_ffn_num_params() { |
| let ffn = FeedForward::new(test_config(), 42); |
| |
| assert_eq!(ffn.num_params(), 33088); |
| } |
|
|
| #[test] |
| fn test_ffn_default_config() { |
| let config = FfnConfig::default_for(256); |
| assert_eq!(config.d_model, 256); |
| assert_eq!(config.d_ff, 1024); |
| } |
|
|
| #[test] |
| fn test_ffn_dimension_mismatch() { |
| let ffn = FeedForward::new(test_config(), 42); |
| let x = Tensor::randn(&[1, 4, 32], 99); |
| assert!(ffn.forward(&x).is_err()); |
| } |
|
|
| #[test] |
| fn test_ffn_weights_count() { |
| let ffn = FeedForward::new(test_config(), 42); |
| assert_eq!(ffn.weights().len(), 4); |
| } |
|
|
| #[test] |
| fn test_ffn_single_position() { |
| |
| let ffn = FeedForward::new(test_config(), 42); |
| let x = Tensor::randn(&[1, 1, 64], 99); |
| let out = ffn.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![1, 1, 64]); |
| } |
|
|
| #[test] |
| fn test_ffn_weight_shapes() { |
| let ffn = FeedForward::new(test_config(), 42); |
| |
| assert_eq!(ffn.w1.shape, vec![256, 64]); |
| assert_eq!(ffn.w2.shape, vec![64, 256]); |
| assert_eq!(ffn.b1.shape, vec![256]); |
| assert_eq!(ffn.b2.shape, vec![64]); |
| } |
| } |
|
|