SPFsmartGATE / src /encoder.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Transformer Encoder
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// N stacked encoder layers: self-attention → layer_norm → FFN → layer_norm
// Sinusoidal positional encoding. Bidirectional attention (no causal mask).
// Input: token embeddings → Output: contextualized representations.
//
// Depends on: tensor.rs, attention.rs, ffn.rs (Layers 0-1)
use crate::tensor::Tensor;
use crate::attention::{AttentionCache, AttentionConfig, MultiHeadAttention};
use crate::ffn::{FfnCache, FfnConfig, FeedForward};
// ============================================================================
// ACTIVATION CACHE (for backward pass — P2-C)
// ============================================================================
/// Cached activations from one encoder layer forward pass
pub struct EncoderLayerCache {
/// Input before LN1 (for layer_norm_backward)
pub ln1_input: Tensor,
/// Attention cache (Q, K, V, attn_weights, input, scale)
pub attn_cache: AttentionCache,
/// Input before LN2 (after attention residual)
pub ln2_input: Tensor,
/// FFN cache (input, hidden_pre_gelu)
pub ffn_cache: FfnCache,
/// Original layer input (for residual backward)
pub residual_input: Tensor,
}
// ============================================================================
// ENCODER CONFIGURATION
// ============================================================================
/// Configuration for the encoder stack
#[derive(Debug, Clone)]
pub struct EncoderConfig {
/// Number of encoder layers
pub n_layers: usize,
/// Model dimension
pub d_model: usize,
/// Number of attention heads
pub n_heads: usize,
/// Feed-forward hidden dimension (default: 4× d_model)
pub d_ff: usize,
/// Maximum sequence length (for positional encoding)
pub max_seq_len: usize,
/// Layer norm epsilon
pub ln_eps: f32,
}
impl EncoderConfig {
/// SPF Writer default: 6 layers, 256 dim, 8 heads
pub fn spf_writer() -> Self {
Self {
n_layers: 6,
d_model: 256,
n_heads: 8,
d_ff: 1024,
max_seq_len: 2048,
ln_eps: 1e-5,
}
}
/// Smaller config for testing
pub fn small() -> Self {
Self {
n_layers: 2,
d_model: 64,
n_heads: 4,
d_ff: 256,
max_seq_len: 512,
ln_eps: 1e-5,
}
}
}
// ============================================================================
// SINGLE ENCODER LAYER
// ============================================================================
/// One encoder layer: self-attention + FFN, each with residual + layer norm.
///
/// Pre-norm architecture (norm before sublayer, used by modern transformers):
/// x → LayerNorm → SelfAttention → + residual → LayerNorm → FFN → + residual
pub struct EncoderLayer {
/// Multi-head self-attention (bidirectional)
pub self_attn: MultiHeadAttention,
/// Feed-forward network
pub ffn: FeedForward,
/// Layer norm before attention
pub ln1_gamma: Tensor,
pub ln1_beta: Tensor,
/// Layer norm before FFN
pub ln2_gamma: Tensor,
pub ln2_beta: Tensor,
/// Epsilon for layer norm
ln_eps: f32,
}
impl EncoderLayer {
/// Initialize a single encoder layer
pub fn new(d_model: usize, n_heads: usize, d_ff: usize, ln_eps: f32, seed: u64) -> Self {
let attn_config = AttentionConfig {
d_model,
n_heads,
causal: false, // Encoder uses bidirectional attention
};
let ffn_config = FfnConfig { d_model, d_ff };
Self {
self_attn: MultiHeadAttention::new(attn_config, seed),
ffn: FeedForward::new(ffn_config, seed + 100),
ln1_gamma: Tensor::ones(&[d_model]),
ln1_beta: Tensor::zeros(&[d_model]),
ln2_gamma: Tensor::ones(&[d_model]),
ln2_beta: Tensor::zeros(&[d_model]),
ln_eps,
}
}
/// Forward pass: [batch, seq, d_model] → [batch, seq, d_model]
pub fn forward(&self, x: &Tensor) -> Result<Tensor, String> {
// Pre-norm self-attention with residual
let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?;
let attn_out = self.self_attn.forward(&normed)?;
let x = x.add(&attn_out)?;
// Pre-norm FFN with residual
let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?;
let ffn_out = self.ffn.forward(&normed)?;
x.add(&ffn_out)
}
/// Forward pass with cached activations for backward.
/// Output is IDENTICAL to forward(). Cache is additional data only.
pub fn forward_with_cache(&self, x: &Tensor) -> Result<(Tensor, EncoderLayerCache), String> {
let residual_input = x.clone();
// Pre-norm self-attention with residual
let ln1_input = x.clone();
let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?;
let (attn_out, attn_cache) = self.self_attn.forward_with_cache(&normed)?;
let x = x.add(&attn_out)?;
// Pre-norm FFN with residual
let ln2_input = x.clone();
let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?;
let (ffn_out, ffn_cache) = self.ffn.forward_with_cache(&normed)?;
let output = x.add(&ffn_out)?;
let cache = EncoderLayerCache {
ln1_input,
attn_cache,
ln2_input,
ffn_cache,
residual_input,
};
Ok((output, cache))
}
/// Total parameters in this layer
pub fn num_params(&self) -> usize {
let d = self.ln1_gamma.numel();
self.self_attn.num_params() + self.ffn.num_params() + 4 * d // 2 LN × (gamma + beta)
}
/// Collect all weight tensors for serialization
pub fn weights(&self) -> Vec<&Tensor> {
let mut w = self.self_attn.weights();
w.extend(self.ffn.weights());
w.extend([&self.ln1_gamma, &self.ln1_beta, &self.ln2_gamma, &self.ln2_beta]);
w
}
/// Collect all weight tensors mutably for optimizer updates
pub fn weights_mut(&mut self) -> Vec<&mut Tensor> {
let mut w = self.self_attn.weights_mut();
w.extend(self.ffn.weights_mut());
w.extend([&mut self.ln1_gamma, &mut self.ln1_beta, &mut self.ln2_gamma, &mut self.ln2_beta]);
w
}
}
// ============================================================================
// POSITIONAL ENCODING
// ============================================================================
/// Generate sinusoidal positional encoding table.
/// PE(pos, 2i) = sin(pos / 10000^(2i/d_model))
/// PE(pos, 2i+1) = cos(pos / 10000^(2i/d_model))
///
/// Returns: [max_seq_len, d_model] tensor
pub fn sinusoidal_positional_encoding(max_seq_len: usize, d_model: usize) -> Tensor {
let mut data = vec![0.0f32; max_seq_len * d_model];
for pos in 0..max_seq_len {
for i in 0..d_model / 2 {
let angle = pos as f32 / (10000.0_f32).powf(2.0 * i as f32 / d_model as f32);
data[pos * d_model + 2 * i] = angle.sin();
data[pos * d_model + 2 * i + 1] = angle.cos();
}
// Handle odd d_model
if d_model % 2 == 1 {
let angle = pos as f32 / (10000.0_f32).powf((d_model - 1) as f32 / d_model as f32);
data[pos * d_model + d_model - 1] = angle.sin();
}
}
Tensor { data, shape: vec![max_seq_len, d_model] }
}
// ============================================================================
// ENCODER STACK
// ============================================================================
/// Full encoder: positional encoding + N encoder layers + final layer norm.
pub struct Encoder {
pub config: EncoderConfig,
/// Positional encoding table [max_seq_len, d_model]
pub pos_encoding: Tensor,
/// Stack of encoder layers
pub layers: Vec<EncoderLayer>,
/// Final layer norm (applied after all layers)
pub final_ln_gamma: Tensor,
pub final_ln_beta: Tensor,
}
impl Encoder {
/// Initialize encoder with given config
pub fn new(config: EncoderConfig, seed: u64) -> Self {
let pos_encoding = sinusoidal_positional_encoding(config.max_seq_len, config.d_model);
let layers: Vec<EncoderLayer> = (0..config.n_layers)
.map(|i| {
EncoderLayer::new(
config.d_model,
config.n_heads,
config.d_ff,
config.ln_eps,
seed + (i as u64) * 1000,
)
})
.collect();
Self {
final_ln_gamma: Tensor::ones(&[config.d_model]),
final_ln_beta: Tensor::zeros(&[config.d_model]),
pos_encoding,
layers,
config,
}
}
/// Forward pass: embeddings [batch, seq_len, d_model] → encoded [batch, seq_len, d_model]
///
/// 1. Add positional encoding to input embeddings
/// 2. Pass through N encoder layers
/// 3. Apply final layer norm
pub fn forward(&self, embeddings: &Tensor) -> Result<Tensor, String> {
let seq_len = embeddings.shape[1];
if seq_len > self.config.max_seq_len {
return Err(format!(
"Sequence length {} exceeds max {}",
seq_len, self.config.max_seq_len
));
}
// Slice positional encoding to actual sequence length
let pos_enc = self.pos_encoding.slice(0, seq_len)?;
// Add positional encoding (broadcasts across batch dimension)
let batch = embeddings.shape[0];
let d_model = embeddings.shape[2];
let mut x_data = embeddings.data.clone();
for b in 0..batch {
for s in 0..seq_len {
for d in 0..d_model {
x_data[(b * seq_len + s) * d_model + d] += pos_enc.data[s * d_model + d];
}
}
}
let mut x = Tensor::from_data(x_data, embeddings.shape.clone())?;
// Pass through encoder layers
for layer in &self.layers {
x = layer.forward(&x)?;
}
// Final layer norm
x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps)
}
/// Total parameters in the encoder
pub fn num_params(&self) -> usize {
let layer_params: usize = self.layers.iter().map(|l| l.num_params()).sum();
layer_params + 2 * self.config.d_model // final LN gamma + beta
}
/// Collect all weight tensors
pub fn weights(&self) -> Vec<&Tensor> {
let mut w: Vec<&Tensor> = Vec::new();
for layer in &self.layers {
w.extend(layer.weights());
}
w.push(&self.final_ln_gamma);
w.push(&self.final_ln_beta);
w
}
/// Collect all weight tensors mutably
pub fn weights_mut(&mut self) -> Vec<&mut Tensor> {
let mut w: Vec<&mut Tensor> = Vec::new();
for layer in &mut self.layers {
w.extend(layer.weights_mut());
}
w.push(&mut self.final_ln_gamma);
w.push(&mut self.final_ln_beta);
w
}
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_positional_encoding_shape() {
let pe = sinusoidal_positional_encoding(100, 64);
assert_eq!(pe.shape, vec![100, 64]);
}
#[test]
fn test_positional_encoding_values() {
let pe = sinusoidal_positional_encoding(10, 8);
// Position 0 should have sin(0)=0, cos(0)=1 for first pair
assert!((pe.data[0] - 0.0).abs() < 1e-5); // sin(0)
assert!((pe.data[1] - 1.0).abs() < 1e-5); // cos(0)
// All values should be in [-1, 1]
assert!(pe.data.iter().all(|&v| v >= -1.0 && v <= 1.0));
}
#[test]
fn test_encoder_layer_shape() {
let layer = EncoderLayer::new(64, 4, 256, 1e-5, 42);
let x = Tensor::randn(&[2, 8, 64], 99);
let out = layer.forward(&x).unwrap();
assert_eq!(out.shape, vec![2, 8, 64]);
}
#[test]
fn test_encoder_layer_residual() {
let layer = EncoderLayer::new(64, 4, 256, 1e-5, 42);
let x = Tensor::randn(&[1, 4, 64], 99);
let out = layer.forward(&x).unwrap();
// With residual connections, output should not be identical to input
// but should be close in magnitude (not exploding)
let diff: f32 = x.data.iter().zip(&out.data)
.map(|(a, b)| (a - b).abs())
.sum::<f32>() / x.numel() as f32;
assert!(diff < 10.0, "Output diverged too far from input: {}", diff);
}
#[test]
fn test_encoder_full_forward() {
let config = EncoderConfig::small();
let encoder = Encoder::new(config, 42);
let x = Tensor::randn(&[1, 8, 64], 99);
let out = encoder.forward(&x).unwrap();
assert_eq!(out.shape, vec![1, 8, 64]);
assert!(out.data.iter().all(|v| v.is_finite()));
}
#[test]
fn test_encoder_seq_exceeds_max() {
let config = EncoderConfig { max_seq_len: 10, ..EncoderConfig::small() };
let encoder = Encoder::new(config, 42);
let x = Tensor::randn(&[1, 20, 64], 99); // seq=20 > max=10
assert!(encoder.forward(&x).is_err());
}
#[test]
fn test_encoder_num_params() {
let config = EncoderConfig::small(); // 2 layers, d=64, ff=256
let encoder = Encoder::new(config, 42);
let params = encoder.num_params();
// Each layer: attn(16640) + ffn(33088) + 4×64(LN) = 49984
// 2 layers + final LN = 2×49984 + 128 = 100096
assert_eq!(params, 100096);
}
#[test]
fn test_encoder_weights_collection() {
let config = EncoderConfig::small();
let encoder = Encoder::new(config, 42);
let weights = encoder.weights();
// Each layer: 8(attn) + 4(ffn) + 4(LN) = 16. ×2 layers + 2 final LN = 34
assert_eq!(weights.len(), 34);
}
}