| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
|
|
| |
| |
| |
|
|
| |
| pub struct AttentionCache { |
| |
| pub q: Tensor, |
| |
| pub k: Tensor, |
| |
| pub v: Tensor, |
| |
| pub attn_weights: Tensor, |
| |
| pub input: Tensor, |
| |
| pub scale: f32, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct AttentionConfig { |
| |
| pub d_model: usize, |
| |
| pub n_heads: usize, |
| |
| pub causal: bool, |
| } |
|
|
| impl AttentionConfig { |
| |
| pub fn d_head(&self) -> usize { |
| self.d_model / self.n_heads |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| pub struct MultiHeadAttention { |
| pub config: AttentionConfig, |
| |
| pub w_q: Tensor, |
| |
| pub w_k: Tensor, |
| |
| pub w_v: Tensor, |
| |
| pub w_o: Tensor, |
| |
| pub b_q: Tensor, |
| |
| pub b_k: Tensor, |
| |
| pub b_v: Tensor, |
| |
| pub b_o: Tensor, |
| } |
|
|
| impl MultiHeadAttention { |
| |
| |
| |
| pub fn new(config: AttentionConfig, seed: u64) -> Self { |
| let d = config.d_model; |
| let scale = (6.0 / (d + d) as f32).sqrt(); |
|
|
| Self { |
| w_q: Tensor::randn(&[d, d], seed).scale(scale), |
| w_k: Tensor::randn(&[d, d], seed + 1).scale(scale), |
| w_v: Tensor::randn(&[d, d], seed + 2).scale(scale), |
| w_o: Tensor::randn(&[d, d], seed + 3).scale(scale), |
| b_q: Tensor::zeros(&[d]), |
| b_k: Tensor::zeros(&[d]), |
| b_v: Tensor::zeros(&[d]), |
| b_o: Tensor::zeros(&[d]), |
| config, |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| pub fn forward(&self, x: &Tensor) -> Result<Tensor, String> { |
| if x.ndim() != 3 { |
| return Err(format!("Attention expects 3D input [batch, seq, d_model], got {}D", x.ndim())); |
| } |
| let batch = x.shape[0]; |
| let seq_len = x.shape[1]; |
| let d_model = x.shape[2]; |
|
|
| if d_model != self.config.d_model { |
| return Err(format!( |
| "Input d_model {} doesn't match config {}", |
| d_model, self.config.d_model |
| )); |
| } |
|
|
| let n_heads = self.config.n_heads; |
| let d_head = self.config.d_head(); |
|
|
| |
| let x_2d = x.reshape(&[batch * seq_len, d_model])?; |
|
|
| |
| let q = x_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * seq_len))?; |
| let k = x_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * seq_len))?; |
| let v = x_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * seq_len))?; |
|
|
| |
| let q = q.reshape(&[batch, seq_len, n_heads, d_head])?; |
| let k = k.reshape(&[batch, seq_len, n_heads, d_head])?; |
| let v = v.reshape(&[batch, seq_len, n_heads, d_head])?; |
|
|
| |
| let q = self.transpose_heads(&q, batch, seq_len, n_heads, d_head)?; |
| let k = self.transpose_heads(&k, batch, seq_len, n_heads, d_head)?; |
| let v = self.transpose_heads(&v, batch, seq_len, n_heads, d_head)?; |
|
|
| |
| let scale = 1.0 / (d_head as f32).sqrt(); |
| let mut attn_output = Vec::with_capacity(batch * n_heads * seq_len * d_head); |
|
|
| for b in 0..batch { |
| for h in 0..n_heads { |
| let bh_offset = (b * n_heads + h) * seq_len * d_head; |
|
|
| |
| let q_slice = Tensor::from_data( |
| q.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
| let k_slice = Tensor::from_data( |
| k.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
| let v_slice = Tensor::from_data( |
| v.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
|
|
| |
| let k_t = k_slice.transpose_2d()?; |
| let scores = q_slice.matmul(&k_t)?.scale(scale); |
|
|
| |
| let scores = if self.config.causal { |
| self.apply_causal_mask(&scores, seq_len)? |
| } else { |
| scores |
| }; |
|
|
| |
| let weights = scores.softmax()?; |
|
|
| |
| let head_out = weights.matmul(&v_slice)?; |
| attn_output.extend_from_slice(&head_out.data); |
| } |
| } |
|
|
| |
| let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, seq_len, d_head])?; |
|
|
| |
| let attn_out = self.transpose_heads_back(&attn_out, batch, seq_len, n_heads, d_head)?; |
|
|
| |
| let concat = attn_out.reshape(&[batch * seq_len, d_model])?; |
|
|
| |
| let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * seq_len))?; |
|
|
| |
| output.reshape(&[batch, seq_len, d_model]) |
| } |
|
|
| |
| |
| pub fn forward_with_cache(&self, x: &Tensor) -> Result<(Tensor, AttentionCache), String> { |
| if x.ndim() != 3 { |
| return Err(format!("Attention expects 3D input [batch, seq, d_model], got {}D", x.ndim())); |
| } |
| let batch = x.shape[0]; |
| let seq_len = x.shape[1]; |
| let d_model = x.shape[2]; |
|
|
| if d_model != self.config.d_model { |
| return Err(format!("Input d_model {} doesn't match config {}", d_model, self.config.d_model)); |
| } |
|
|
| let n_heads = self.config.n_heads; |
| let d_head = self.config.d_head(); |
|
|
| let x_2d = x.reshape(&[batch * seq_len, d_model])?; |
| let input_cache = x_2d.clone(); |
|
|
| let q = x_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * seq_len))?; |
| let k = x_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * seq_len))?; |
| let v = x_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * seq_len))?; |
|
|
| let q = q.reshape(&[batch, seq_len, n_heads, d_head])?; |
| let k = k.reshape(&[batch, seq_len, n_heads, d_head])?; |
| let v = v.reshape(&[batch, seq_len, n_heads, d_head])?; |
|
|
| let q = self.transpose_heads(&q, batch, seq_len, n_heads, d_head)?; |
| let k = self.transpose_heads(&k, batch, seq_len, n_heads, d_head)?; |
| let v = self.transpose_heads(&v, batch, seq_len, n_heads, d_head)?; |
|
|
| let scale = 1.0 / (d_head as f32).sqrt(); |
| let mut attn_output = Vec::with_capacity(batch * n_heads * seq_len * d_head); |
| let mut all_weights = Vec::with_capacity(batch * n_heads * seq_len * seq_len); |
|
|
| for b in 0..batch { |
| for h in 0..n_heads { |
| let bh_offset = (b * n_heads + h) * seq_len * d_head; |
| let q_slice = Tensor::from_data( |
| q.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
| let k_slice = Tensor::from_data( |
| k.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
| let v_slice = Tensor::from_data( |
| v.data[bh_offset..bh_offset + seq_len * d_head].to_vec(), |
| vec![seq_len, d_head], |
| )?; |
|
|
| let scores = q_slice.matmul(&k_slice.transpose_2d()?)?.scale(scale); |
| let scores = if self.config.causal { |
| self.apply_causal_mask(&scores, seq_len)? |
| } else { |
| scores |
| }; |
| let weights = scores.softmax()?; |
| let head_out = weights.matmul(&v_slice)?; |
| all_weights.extend_from_slice(&weights.data); |
| attn_output.extend_from_slice(&head_out.data); |
| } |
| } |
|
|
| let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, seq_len, d_head])?; |
| let attn_weights = Tensor::from_data(all_weights, vec![batch, n_heads, seq_len, seq_len])?; |
| let attn_out = self.transpose_heads_back(&attn_out, batch, seq_len, n_heads, d_head)?; |
| let concat = attn_out.reshape(&[batch * seq_len, d_model])?; |
| let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * seq_len))?; |
| let output = output.reshape(&[batch, seq_len, d_model])?; |
|
|
| let cache = AttentionCache { |
| q: q, |
| k: k, |
| v: v, |
| attn_weights, |
| input: input_cache, |
| scale, |
| }; |
|
|
| Ok((output, cache)) |
| } |
|
|
| |
| |
| |
| pub fn forward_cross( |
| &self, |
| q_input: &Tensor, |
| kv_input: &Tensor, |
| ) -> Result<Tensor, String> { |
| let batch = q_input.shape[0]; |
| let dec_seq = q_input.shape[1]; |
| let enc_seq = kv_input.shape[1]; |
| let d_model = q_input.shape[2]; |
| let n_heads = self.config.n_heads; |
| let d_head = self.config.d_head(); |
|
|
| |
| let q_2d = q_input.reshape(&[batch * dec_seq, d_model])?; |
| let kv_2d = kv_input.reshape(&[batch * enc_seq, d_model])?; |
|
|
| let q = q_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * dec_seq))?; |
| let k = kv_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * enc_seq))?; |
| let v = kv_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * enc_seq))?; |
|
|
| let q = q.reshape(&[batch, dec_seq, n_heads, d_head])?; |
| let k = k.reshape(&[batch, enc_seq, n_heads, d_head])?; |
| let v = v.reshape(&[batch, enc_seq, n_heads, d_head])?; |
|
|
| let q = self.transpose_heads(&q, batch, dec_seq, n_heads, d_head)?; |
| let k = self.transpose_heads(&k, batch, enc_seq, n_heads, d_head)?; |
| let v = self.transpose_heads(&v, batch, enc_seq, n_heads, d_head)?; |
|
|
| let scale = 1.0 / (d_head as f32).sqrt(); |
| let mut attn_output = Vec::with_capacity(batch * n_heads * dec_seq * d_head); |
|
|
| for b in 0..batch { |
| for h in 0..n_heads { |
| let q_off = (b * n_heads + h) * dec_seq * d_head; |
| let k_off = (b * n_heads + h) * enc_seq * d_head; |
|
|
| let q_slice = Tensor::from_data( |
| q.data[q_off..q_off + dec_seq * d_head].to_vec(), |
| vec![dec_seq, d_head], |
| )?; |
| let k_slice = Tensor::from_data( |
| k.data[k_off..k_off + enc_seq * d_head].to_vec(), |
| vec![enc_seq, d_head], |
| )?; |
| let v_slice = Tensor::from_data( |
| v.data[k_off..k_off + enc_seq * d_head].to_vec(), |
| vec![enc_seq, d_head], |
| )?; |
|
|
| |
| let scores = q_slice.matmul(&k_slice.transpose_2d()?)?.scale(scale); |
| |
| let weights = scores.softmax()?; |
| let head_out = weights.matmul(&v_slice)?; |
| attn_output.extend_from_slice(&head_out.data); |
| } |
| } |
|
|
| let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, dec_seq, d_head])?; |
| let attn_out = self.transpose_heads_back(&attn_out, batch, dec_seq, n_heads, d_head)?; |
| let concat = attn_out.reshape(&[batch * dec_seq, d_model])?; |
| let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * dec_seq))?; |
| output.reshape(&[batch, dec_seq, d_model]) |
| } |
|
|
| |
| |
| |
|
|
| |
| fn transpose_heads( |
| &self, t: &Tensor, batch: usize, seq: usize, n_heads: usize, d_head: usize, |
| ) -> Result<Tensor, String> { |
| let mut out = vec![0.0f32; batch * n_heads * seq * d_head]; |
| for b in 0..batch { |
| for s in 0..seq { |
| for h in 0..n_heads { |
| for d in 0..d_head { |
| let src_idx = ((b * seq + s) * n_heads + h) * d_head + d; |
| let dst_idx = ((b * n_heads + h) * seq + s) * d_head + d; |
| out[dst_idx] = t.data[src_idx]; |
| } |
| } |
| } |
| } |
| Tensor::from_data(out, vec![batch, n_heads, seq, d_head]) |
| } |
|
|
| |
| fn transpose_heads_back( |
| &self, t: &Tensor, batch: usize, seq: usize, n_heads: usize, d_head: usize, |
| ) -> Result<Tensor, String> { |
| let mut out = vec![0.0f32; batch * seq * n_heads * d_head]; |
| for b in 0..batch { |
| for h in 0..n_heads { |
| for s in 0..seq { |
| for d in 0..d_head { |
| let src_idx = ((b * n_heads + h) * seq + s) * d_head + d; |
| let dst_idx = ((b * seq + s) * n_heads + h) * d_head + d; |
| out[dst_idx] = t.data[src_idx]; |
| } |
| } |
| } |
| } |
| Tensor::from_data(out, vec![batch, seq, n_heads * d_head]) |
| } |
|
|
| |
| fn apply_causal_mask(&self, scores: &Tensor, seq_len: usize) -> Result<Tensor, String> { |
| let mut data = scores.data.clone(); |
| for i in 0..seq_len { |
| for j in (i + 1)..seq_len { |
| data[i * seq_len + j] = f32::NEG_INFINITY; |
| } |
| } |
| Tensor::from_data(data, scores.shape.clone()) |
| } |
|
|
| |
| fn expand_bias(&self, bias: &Tensor, n_rows: usize) -> Tensor { |
| let d = bias.numel(); |
| let mut data = Vec::with_capacity(n_rows * d); |
| for _ in 0..n_rows { |
| data.extend_from_slice(&bias.data); |
| } |
| Tensor { data, shape: vec![n_rows, d] } |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let d = self.config.d_model; |
| |
| 4 * d * d + 4 * d |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| vec![&self.w_q, &self.w_k, &self.w_v, &self.w_o, |
| &self.b_q, &self.b_k, &self.b_v, &self.b_o] |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| vec![&mut self.w_q, &mut self.w_k, &mut self.w_v, &mut self.w_o, |
| &mut self.b_q, &mut self.b_k, &mut self.b_v, &mut self.b_o] |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| fn test_config() -> AttentionConfig { |
| AttentionConfig { |
| d_model: 64, |
| n_heads: 4, |
| causal: false, |
| } |
| } |
|
|
| #[test] |
| fn test_attention_output_shape() { |
| let attn = MultiHeadAttention::new(test_config(), 42); |
| let x = Tensor::randn(&[2, 8, 64], 99); |
| let out = attn.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![2, 8, 64]); |
| } |
|
|
| #[test] |
| fn test_attention_causal_mask() { |
| let config = AttentionConfig { |
| d_model: 64, |
| n_heads: 4, |
| causal: true, |
| }; |
| let attn = MultiHeadAttention::new(config, 42); |
| let x = Tensor::randn(&[1, 4, 64], 99); |
| let out = attn.forward(&x).unwrap(); |
| assert_eq!(out.shape, vec![1, 4, 64]); |
| |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_cross_attention_shape() { |
| let config = AttentionConfig { |
| d_model: 64, |
| n_heads: 4, |
| causal: false, |
| }; |
| let attn = MultiHeadAttention::new(config, 42); |
| let dec = Tensor::randn(&[1, 4, 64], 99); |
| let enc = Tensor::randn(&[1, 8, 64], 100); |
| let out = attn.forward_cross(&dec, &enc).unwrap(); |
| assert_eq!(out.shape, vec![1, 4, 64]); |
| } |
|
|
| #[test] |
| fn test_num_params() { |
| let attn = MultiHeadAttention::new(test_config(), 42); |
| |
| assert_eq!(attn.num_params(), 16640); |
| } |
|
|
| #[test] |
| fn test_d_head() { |
| let config = AttentionConfig { d_model: 256, n_heads: 8, causal: false }; |
| assert_eq!(config.d_head(), 32); |
| } |
|
|
| #[test] |
| fn test_dimension_mismatch() { |
| let attn = MultiHeadAttention::new(test_config(), 42); |
| let x = Tensor::randn(&[1, 4, 32], 99); |
| assert!(attn.forward(&x).is_err()); |
| } |
|
|
| #[test] |
| fn test_weights_count() { |
| let attn = MultiHeadAttention::new(test_config(), 42); |
| assert_eq!(attn.weights().len(), 8); |
| } |
| } |
|
|