SPFsmartGATE / src /attention.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Multi-Head Self-Attention
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// Scaled dot-product attention with multi-head projection.
// Supports both causal (decoder) and bidirectional (encoder) masking.
// Pure Rust — builds on tensor.rs only.
//
// Depends on: tensor.rs (Layer 0)
use crate::tensor::Tensor;
// ============================================================================
// ACTIVATION CACHE (for backward pass — P2-C)
// ============================================================================
/// Cached activations from attention forward pass (for backward)
pub struct AttentionCache {
/// Q projections [batch, n_heads, seq, d_head]
pub q: Tensor,
/// K projections [batch, n_heads, seq, d_head]
pub k: Tensor,
/// V projections [batch, n_heads, seq, d_head]
pub v: Tensor,
/// Attention weights [batch, n_heads, seq, seq]
pub attn_weights: Tensor,
/// Input to projections [batch*seq, d_model]
pub input: Tensor,
/// Scale factor (1/sqrt(d_head))
pub scale: f32,
}
// ============================================================================
// ATTENTION CONFIGURATION
// ============================================================================
/// Configuration for multi-head attention
#[derive(Debug, Clone)]
pub struct AttentionConfig {
/// Model dimension (total across all heads)
pub d_model: usize,
/// Number of attention heads
pub n_heads: usize,
/// Whether to apply causal masking (decoder-style)
pub causal: bool,
}
impl AttentionConfig {
/// Dimension per head: d_model / n_heads
pub fn d_head(&self) -> usize {
self.d_model / self.n_heads
}
}
// ============================================================================
// MULTI-HEAD ATTENTION
// ============================================================================
/// Multi-head self-attention layer.
///
/// Contains Q, K, V projection weights and output projection.
/// All weights stored as 2D tensors [d_model, d_model].
pub struct MultiHeadAttention {
pub config: AttentionConfig,
/// Query projection: [d_model, d_model]
pub w_q: Tensor,
/// Key projection: [d_model, d_model]
pub w_k: Tensor,
/// Value projection: [d_model, d_model]
pub w_v: Tensor,
/// Output projection: [d_model, d_model]
pub w_o: Tensor,
/// Query bias: [d_model]
pub b_q: Tensor,
/// Key bias: [d_model]
pub b_k: Tensor,
/// Value bias: [d_model]
pub b_v: Tensor,
/// Output bias: [d_model]
pub b_o: Tensor,
}
impl MultiHeadAttention {
/// Initialize with Xavier/Glorot uniform scaling
/// Scale = sqrt(6 / (fan_in + fan_out)) for uniform distribution
/// For d_model=256: scale ≈ 0.108
pub fn new(config: AttentionConfig, seed: u64) -> Self {
let d = config.d_model;
let scale = (6.0 / (d + d) as f32).sqrt();
Self {
w_q: Tensor::randn(&[d, d], seed).scale(scale),
w_k: Tensor::randn(&[d, d], seed + 1).scale(scale),
w_v: Tensor::randn(&[d, d], seed + 2).scale(scale),
w_o: Tensor::randn(&[d, d], seed + 3).scale(scale),
b_q: Tensor::zeros(&[d]),
b_k: Tensor::zeros(&[d]),
b_v: Tensor::zeros(&[d]),
b_o: Tensor::zeros(&[d]),
config,
}
}
/// Forward pass: input [batch, seq_len, d_model] → output [batch, seq_len, d_model]
///
/// Steps:
/// 1. Project input to Q, K, V using learned weights
/// 2. Split into multiple heads
/// 3. Compute scaled dot-product attention per head
/// 4. Concatenate heads and project output
pub fn forward(&self, x: &Tensor) -> Result<Tensor, String> {
if x.ndim() != 3 {
return Err(format!("Attention expects 3D input [batch, seq, d_model], got {}D", x.ndim()));
}
let batch = x.shape[0];
let seq_len = x.shape[1];
let d_model = x.shape[2];
if d_model != self.config.d_model {
return Err(format!(
"Input d_model {} doesn't match config {}",
d_model, self.config.d_model
));
}
let n_heads = self.config.n_heads;
let d_head = self.config.d_head();
// Reshape input to [batch * seq_len, d_model] for matmul
let x_2d = x.reshape(&[batch * seq_len, d_model])?;
// Project Q, K, V: [batch*seq, d_model] × [d_model, d_model] = [batch*seq, d_model]
let q = x_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * seq_len))?;
let k = x_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * seq_len))?;
let v = x_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * seq_len))?;
// Reshape to [batch, seq_len, n_heads, d_head]
let q = q.reshape(&[batch, seq_len, n_heads, d_head])?;
let k = k.reshape(&[batch, seq_len, n_heads, d_head])?;
let v = v.reshape(&[batch, seq_len, n_heads, d_head])?;
// Transpose to [batch, n_heads, seq_len, d_head] for attention computation
let q = self.transpose_heads(&q, batch, seq_len, n_heads, d_head)?;
let k = self.transpose_heads(&k, batch, seq_len, n_heads, d_head)?;
let v = self.transpose_heads(&v, batch, seq_len, n_heads, d_head)?;
// Compute attention for each batch × head combination
let scale = 1.0 / (d_head as f32).sqrt();
let mut attn_output = Vec::with_capacity(batch * n_heads * seq_len * d_head);
for b in 0..batch {
for h in 0..n_heads {
let bh_offset = (b * n_heads + h) * seq_len * d_head;
// Extract Q, K, V slices for this batch/head: [seq_len, d_head]
let q_slice = Tensor::from_data(
q.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
let k_slice = Tensor::from_data(
k.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
let v_slice = Tensor::from_data(
v.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
// Attention scores: Q × K^T / sqrt(d_head) → [seq_len, seq_len]
let k_t = k_slice.transpose_2d()?;
let scores = q_slice.matmul(&k_t)?.scale(scale);
// Apply causal mask if needed (set future positions to -inf)
let scores = if self.config.causal {
self.apply_causal_mask(&scores, seq_len)?
} else {
scores
};
// Softmax over last dimension → attention weights
let weights = scores.softmax()?;
// Weighted sum of values: weights × V → [seq_len, d_head]
let head_out = weights.matmul(&v_slice)?;
attn_output.extend_from_slice(&head_out.data);
}
}
// Output is [batch, n_heads, seq_len, d_head]
let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, seq_len, d_head])?;
// Transpose back to [batch, seq_len, n_heads, d_head]
let attn_out = self.transpose_heads_back(&attn_out, batch, seq_len, n_heads, d_head)?;
// Reshape to [batch * seq_len, d_model] for output projection
let concat = attn_out.reshape(&[batch * seq_len, d_model])?;
// Output projection
let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * seq_len))?;
// Reshape back to [batch, seq_len, d_model]
output.reshape(&[batch, seq_len, d_model])
}
/// Forward pass that also returns cached activations for backward.
/// Output is IDENTICAL to forward(). Cache is additional data only.
pub fn forward_with_cache(&self, x: &Tensor) -> Result<(Tensor, AttentionCache), String> {
if x.ndim() != 3 {
return Err(format!("Attention expects 3D input [batch, seq, d_model], got {}D", x.ndim()));
}
let batch = x.shape[0];
let seq_len = x.shape[1];
let d_model = x.shape[2];
if d_model != self.config.d_model {
return Err(format!("Input d_model {} doesn't match config {}", d_model, self.config.d_model));
}
let n_heads = self.config.n_heads;
let d_head = self.config.d_head();
let x_2d = x.reshape(&[batch * seq_len, d_model])?;
let input_cache = x_2d.clone();
let q = x_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * seq_len))?;
let k = x_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * seq_len))?;
let v = x_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * seq_len))?;
let q = q.reshape(&[batch, seq_len, n_heads, d_head])?;
let k = k.reshape(&[batch, seq_len, n_heads, d_head])?;
let v = v.reshape(&[batch, seq_len, n_heads, d_head])?;
let q = self.transpose_heads(&q, batch, seq_len, n_heads, d_head)?;
let k = self.transpose_heads(&k, batch, seq_len, n_heads, d_head)?;
let v = self.transpose_heads(&v, batch, seq_len, n_heads, d_head)?;
let scale = 1.0 / (d_head as f32).sqrt();
let mut attn_output = Vec::with_capacity(batch * n_heads * seq_len * d_head);
let mut all_weights = Vec::with_capacity(batch * n_heads * seq_len * seq_len);
for b in 0..batch {
for h in 0..n_heads {
let bh_offset = (b * n_heads + h) * seq_len * d_head;
let q_slice = Tensor::from_data(
q.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
let k_slice = Tensor::from_data(
k.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
let v_slice = Tensor::from_data(
v.data[bh_offset..bh_offset + seq_len * d_head].to_vec(),
vec![seq_len, d_head],
)?;
let scores = q_slice.matmul(&k_slice.transpose_2d()?)?.scale(scale);
let scores = if self.config.causal {
self.apply_causal_mask(&scores, seq_len)?
} else {
scores
};
let weights = scores.softmax()?;
let head_out = weights.matmul(&v_slice)?;
all_weights.extend_from_slice(&weights.data);
attn_output.extend_from_slice(&head_out.data);
}
}
let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, seq_len, d_head])?;
let attn_weights = Tensor::from_data(all_weights, vec![batch, n_heads, seq_len, seq_len])?;
let attn_out = self.transpose_heads_back(&attn_out, batch, seq_len, n_heads, d_head)?;
let concat = attn_out.reshape(&[batch * seq_len, d_model])?;
let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * seq_len))?;
let output = output.reshape(&[batch, seq_len, d_model])?;
let cache = AttentionCache {
q: q,
k: k,
v: v,
attn_weights,
input: input_cache,
scale,
};
Ok((output, cache))
}
/// Forward pass with cross-attention (for decoder attending to encoder output)
/// q_input: decoder state [batch, dec_seq, d_model]
/// kv_input: encoder output [batch, enc_seq, d_model]
pub fn forward_cross(
&self,
q_input: &Tensor,
kv_input: &Tensor,
) -> Result<Tensor, String> {
let batch = q_input.shape[0];
let dec_seq = q_input.shape[1];
let enc_seq = kv_input.shape[1];
let d_model = q_input.shape[2];
let n_heads = self.config.n_heads;
let d_head = self.config.d_head();
// Project Q from decoder, K/V from encoder
let q_2d = q_input.reshape(&[batch * dec_seq, d_model])?;
let kv_2d = kv_input.reshape(&[batch * enc_seq, d_model])?;
let q = q_2d.matmul(&self.w_q.transpose_2d()?)?.add(&self.expand_bias(&self.b_q, batch * dec_seq))?;
let k = kv_2d.matmul(&self.w_k.transpose_2d()?)?.add(&self.expand_bias(&self.b_k, batch * enc_seq))?;
let v = kv_2d.matmul(&self.w_v.transpose_2d()?)?.add(&self.expand_bias(&self.b_v, batch * enc_seq))?;
let q = q.reshape(&[batch, dec_seq, n_heads, d_head])?;
let k = k.reshape(&[batch, enc_seq, n_heads, d_head])?;
let v = v.reshape(&[batch, enc_seq, n_heads, d_head])?;
let q = self.transpose_heads(&q, batch, dec_seq, n_heads, d_head)?;
let k = self.transpose_heads(&k, batch, enc_seq, n_heads, d_head)?;
let v = self.transpose_heads(&v, batch, enc_seq, n_heads, d_head)?;
let scale = 1.0 / (d_head as f32).sqrt();
let mut attn_output = Vec::with_capacity(batch * n_heads * dec_seq * d_head);
for b in 0..batch {
for h in 0..n_heads {
let q_off = (b * n_heads + h) * dec_seq * d_head;
let k_off = (b * n_heads + h) * enc_seq * d_head;
let q_slice = Tensor::from_data(
q.data[q_off..q_off + dec_seq * d_head].to_vec(),
vec![dec_seq, d_head],
)?;
let k_slice = Tensor::from_data(
k.data[k_off..k_off + enc_seq * d_head].to_vec(),
vec![enc_seq, d_head],
)?;
let v_slice = Tensor::from_data(
v.data[k_off..k_off + enc_seq * d_head].to_vec(),
vec![enc_seq, d_head],
)?;
// Q[dec_seq, d_head] × K^T[d_head, enc_seq] → [dec_seq, enc_seq]
let scores = q_slice.matmul(&k_slice.transpose_2d()?)?.scale(scale);
// No causal mask for cross-attention — decoder can attend to all encoder positions
let weights = scores.softmax()?;
let head_out = weights.matmul(&v_slice)?;
attn_output.extend_from_slice(&head_out.data);
}
}
let attn_out = Tensor::from_data(attn_output, vec![batch, n_heads, dec_seq, d_head])?;
let attn_out = self.transpose_heads_back(&attn_out, batch, dec_seq, n_heads, d_head)?;
let concat = attn_out.reshape(&[batch * dec_seq, d_model])?;
let output = concat.matmul(&self.w_o.transpose_2d()?)?.add(&self.expand_bias(&self.b_o, batch * dec_seq))?;
output.reshape(&[batch, dec_seq, d_model])
}
// ========================================================================
// INTERNAL HELPERS
// ========================================================================
/// Transpose [batch, seq, n_heads, d_head] → [batch, n_heads, seq, d_head]
fn transpose_heads(
&self, t: &Tensor, batch: usize, seq: usize, n_heads: usize, d_head: usize,
) -> Result<Tensor, String> {
let mut out = vec![0.0f32; batch * n_heads * seq * d_head];
for b in 0..batch {
for s in 0..seq {
for h in 0..n_heads {
for d in 0..d_head {
let src_idx = ((b * seq + s) * n_heads + h) * d_head + d;
let dst_idx = ((b * n_heads + h) * seq + s) * d_head + d;
out[dst_idx] = t.data[src_idx];
}
}
}
}
Tensor::from_data(out, vec![batch, n_heads, seq, d_head])
}
/// Transpose [batch, n_heads, seq, d_head] → [batch, seq, n_heads, d_head]
fn transpose_heads_back(
&self, t: &Tensor, batch: usize, seq: usize, n_heads: usize, d_head: usize,
) -> Result<Tensor, String> {
let mut out = vec![0.0f32; batch * seq * n_heads * d_head];
for b in 0..batch {
for h in 0..n_heads {
for s in 0..seq {
for d in 0..d_head {
let src_idx = ((b * n_heads + h) * seq + s) * d_head + d;
let dst_idx = ((b * seq + s) * n_heads + h) * d_head + d;
out[dst_idx] = t.data[src_idx];
}
}
}
}
Tensor::from_data(out, vec![batch, seq, n_heads * d_head])
}
/// Apply causal mask: set attention scores for future positions to -inf
fn apply_causal_mask(&self, scores: &Tensor, seq_len: usize) -> Result<Tensor, String> {
let mut data = scores.data.clone();
for i in 0..seq_len {
for j in (i + 1)..seq_len {
data[i * seq_len + j] = f32::NEG_INFINITY;
}
}
Tensor::from_data(data, scores.shape.clone())
}
/// Expand bias [d_model] to [n_rows, d_model] for addition after matmul
fn expand_bias(&self, bias: &Tensor, n_rows: usize) -> Tensor {
let d = bias.numel();
let mut data = Vec::with_capacity(n_rows * d);
for _ in 0..n_rows {
data.extend_from_slice(&bias.data);
}
Tensor { data, shape: vec![n_rows, d] }
}
/// Total number of parameters in this attention layer
pub fn num_params(&self) -> usize {
let d = self.config.d_model;
// 4 weight matrices [d,d] + 4 bias vectors [d]
4 * d * d + 4 * d
}
/// Collect all weight tensors (for serialization / gradient updates)
pub fn weights(&self) -> Vec<&Tensor> {
vec![&self.w_q, &self.w_k, &self.w_v, &self.w_o,
&self.b_q, &self.b_k, &self.b_v, &self.b_o]
}
/// Collect all weight tensors mutably (for optimizer updates)
pub fn weights_mut(&mut self) -> Vec<&mut Tensor> {
vec![&mut self.w_q, &mut self.w_k, &mut self.w_v, &mut self.w_o,
&mut self.b_q, &mut self.b_k, &mut self.b_v, &mut self.b_o]
}
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
fn test_config() -> AttentionConfig {
AttentionConfig {
d_model: 64,
n_heads: 4,
causal: false,
}
}
#[test]
fn test_attention_output_shape() {
let attn = MultiHeadAttention::new(test_config(), 42);
let x = Tensor::randn(&[2, 8, 64], 99); // batch=2, seq=8, d=64
let out = attn.forward(&x).unwrap();
assert_eq!(out.shape, vec![2, 8, 64]);
}
#[test]
fn test_attention_causal_mask() {
let config = AttentionConfig {
d_model: 64,
n_heads: 4,
causal: true,
};
let attn = MultiHeadAttention::new(config, 42);
let x = Tensor::randn(&[1, 4, 64], 99);
let out = attn.forward(&x).unwrap();
assert_eq!(out.shape, vec![1, 4, 64]);
// Values should be finite (causal mask doesn't break softmax)
assert!(out.data.iter().all(|v| v.is_finite()));
}
#[test]
fn test_cross_attention_shape() {
let config = AttentionConfig {
d_model: 64,
n_heads: 4,
causal: false,
};
let attn = MultiHeadAttention::new(config, 42);
let dec = Tensor::randn(&[1, 4, 64], 99); // decoder: seq=4
let enc = Tensor::randn(&[1, 8, 64], 100); // encoder: seq=8
let out = attn.forward_cross(&dec, &enc).unwrap();
assert_eq!(out.shape, vec![1, 4, 64]); // output follows decoder seq_len
}
#[test]
fn test_num_params() {
let attn = MultiHeadAttention::new(test_config(), 42);
// d=64: 4×64×64 + 4×64 = 16384 + 256 = 16640
assert_eq!(attn.num_params(), 16640);
}
#[test]
fn test_d_head() {
let config = AttentionConfig { d_model: 256, n_heads: 8, causal: false };
assert_eq!(config.d_head(), 32);
}
#[test]
fn test_dimension_mismatch() {
let attn = MultiHeadAttention::new(test_config(), 42);
let x = Tensor::randn(&[1, 4, 32], 99); // wrong d_model
assert!(attn.forward(&x).is_err());
}
#[test]
fn test_weights_count() {
let attn = MultiHeadAttention::new(test_config(), 42);
assert_eq!(attn.weights().len(), 8); // 4 weight matrices + 4 biases
}
}