// SPF Smart Gateway - Transformer Checkpoint System // Copyright 2026 Joseph Stone - All Rights Reserved // // Save/load transformer weights to/from binary format. // Versioned checkpoints for rollback safety. // Delta format for mesh weight sharing (send only changed weights). // // Storage: LMDB via agent_state.rs or flat binary files in DEPLOY. // Binary format: [magic:4][version:4][num_tensors:4][tensor_headers][tensor_data] // // Depends on: tensor.rs, transformer.rs use crate::tensor::Tensor; use std::io::{self, Read, Write, Cursor}; // ============================================================================ // CHECKPOINT FORMAT // ============================================================================ /// Magic bytes identifying SPF checkpoint files const CHECKPOINT_MAGIC: &[u8; 4] = b"SPFC"; /// Current checkpoint format version const CHECKPOINT_VERSION: u32 = 1; /// Metadata for a saved checkpoint #[derive(Debug, Clone)] pub struct CheckpointMeta { /// Version of the checkpoint format pub format_version: u32, /// Number of tensor parameters pub num_tensors: u32, /// Total bytes of weight data pub total_bytes: u64, /// Checkpoint creation timestamp (RFC3339) pub timestamp: String, /// Model config identifier (e.g., "spf_writer_v1") pub model_id: String, /// Training step at which checkpoint was saved pub step: u64, } // ============================================================================ // SERIALIZE — Weights to bytes // ============================================================================ /// Serialize a list of tensors to binary checkpoint format. /// /// Format: /// ```text /// [SPFC] 4 bytes magic /// [version] 4 bytes u32 BE /// [num_tensors] 4 bytes u32 BE /// [model_id_len] 2 bytes u16 BE /// [model_id] N bytes UTF-8 /// [step] 8 bytes u64 BE /// For each tensor: /// [ndim] 4 bytes u32 BE /// [shape_dims] ndim × 4 bytes u32 BE each /// [data] numel × 4 bytes f32 LE (native float layout) /// ``` pub fn serialize_weights( weights: &[&Tensor], model_id: &str, step: u64, ) -> Result, io::Error> { let mut buf: Vec = Vec::new(); // Header buf.write_all(CHECKPOINT_MAGIC)?; buf.write_all(&CHECKPOINT_VERSION.to_be_bytes())?; buf.write_all(&(weights.len() as u32).to_be_bytes())?; // Model ID let id_bytes = model_id.as_bytes(); if id_bytes.len() > u16::MAX as usize { return Err(io::Error::new(io::ErrorKind::InvalidInput, "Model ID too long")); } buf.write_all(&(id_bytes.len() as u16).to_be_bytes())?; buf.write_all(id_bytes)?; // Step buf.write_all(&step.to_be_bytes())?; // Tensors for tensor in weights { // Number of dimensions buf.write_all(&(tensor.ndim() as u32).to_be_bytes())?; // Shape for &dim in &tensor.shape { buf.write_all(&(dim as u32).to_be_bytes())?; } // Data as raw f32 bytes (little-endian, native on ARM) for &val in &tensor.data { buf.write_all(&val.to_le_bytes())?; } } Ok(buf) } /// Deserialize weights from binary checkpoint format. /// Returns (tensors, metadata). pub fn deserialize_weights(data: &[u8]) -> Result<(Vec, CheckpointMeta), io::Error> { let mut cursor = Cursor::new(data); // Magic let mut magic = [0u8; 4]; cursor.read_exact(&mut magic)?; if &magic != CHECKPOINT_MAGIC { return Err(io::Error::new( io::ErrorKind::InvalidData, format!("Invalid checkpoint magic: {:?}", magic), )); } // Version let mut ver_buf = [0u8; 4]; cursor.read_exact(&mut ver_buf)?; let version = u32::from_be_bytes(ver_buf); if version != CHECKPOINT_VERSION { return Err(io::Error::new( io::ErrorKind::InvalidData, format!("Unsupported checkpoint version: {} (expected {})", version, CHECKPOINT_VERSION), )); } // Num tensors let mut nt_buf = [0u8; 4]; cursor.read_exact(&mut nt_buf)?; let num_tensors = u32::from_be_bytes(nt_buf); // Model ID let mut id_len_buf = [0u8; 2]; cursor.read_exact(&mut id_len_buf)?; let id_len = u16::from_be_bytes(id_len_buf) as usize; let mut id_buf = vec![0u8; id_len]; cursor.read_exact(&mut id_buf)?; let model_id = String::from_utf8(id_buf) .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; // Step let mut step_buf = [0u8; 8]; cursor.read_exact(&mut step_buf)?; let step = u64::from_be_bytes(step_buf); // Read tensors let mut tensors = Vec::with_capacity(num_tensors as usize); let mut total_bytes: u64 = 0; for _ in 0..num_tensors { // ndim let mut ndim_buf = [0u8; 4]; cursor.read_exact(&mut ndim_buf)?; let ndim = u32::from_be_bytes(ndim_buf) as usize; // Shape let mut shape = Vec::with_capacity(ndim); for _ in 0..ndim { let mut dim_buf = [0u8; 4]; cursor.read_exact(&mut dim_buf)?; shape.push(u32::from_be_bytes(dim_buf) as usize); } // Data let numel: usize = shape.iter().product(); let mut data = Vec::with_capacity(numel); for _ in 0..numel { let mut f_buf = [0u8; 4]; cursor.read_exact(&mut f_buf)?; data.push(f32::from_le_bytes(f_buf)); } total_bytes += (numel * 4) as u64; tensors.push(Tensor { data, shape }); } let meta = CheckpointMeta { format_version: version, num_tensors, total_bytes, timestamp: String::new(), // Not stored in binary — caller fills from filesystem model_id, step, }; Ok((tensors, meta)) } // ============================================================================ // APPLY WEIGHTS — Load deserialized tensors into a model // ============================================================================ /// Apply deserialized weights to a model's weight tensors. /// Verifies shape compatibility before applying. pub fn apply_weights( model_weights: &mut [&mut Tensor], checkpoint_weights: &[Tensor], ) -> Result<(), String> { if model_weights.len() != checkpoint_weights.len() { return Err(format!( "Weight count mismatch: model has {}, checkpoint has {}", model_weights.len(), checkpoint_weights.len() )); } for (i, (model_w, ckpt_w)) in model_weights.iter_mut().zip(checkpoint_weights.iter()).enumerate() { if model_w.shape != ckpt_w.shape { return Err(format!( "Shape mismatch at weight {}: model {:?}, checkpoint {:?}", i, model_w.shape, ckpt_w.shape )); } model_w.data.copy_from_slice(&ckpt_w.data); } Ok(()) } // ============================================================================ // DELTA CHECKPOINTS — For mesh weight sharing // ============================================================================ /// Compute weight delta: new_weights - old_weights /// Only non-zero deltas are included (sparse representation) /// Returns: (tensor_index, delta_tensor) pairs pub fn compute_delta( old_weights: &[&Tensor], new_weights: &[&Tensor], threshold: f32, ) -> Vec<(usize, Tensor)> { let mut deltas = Vec::new(); for (i, (old, new)) in old_weights.iter().zip(new_weights.iter()).enumerate() { if old.shape != new.shape { continue; // Shape mismatch — skip (shouldn't happen) } let diff: Vec = old.data.iter() .zip(&new.data) .map(|(&a, &b)| b - a) .collect(); // Check if any element exceeds threshold let has_change = diff.iter().any(|&d| d.abs() > threshold); if has_change { deltas.push((i, Tensor { data: diff, shape: old.shape.clone(), })); } } deltas } /// Apply weight deltas to model weights /// delta: (tensor_index, delta_values) pairs from compute_delta pub fn apply_delta( weights: &mut [&mut Tensor], deltas: &[(usize, Tensor)], ) -> Result<(), String> { for (idx, delta) in deltas { if *idx >= weights.len() { return Err(format!("Delta index {} exceeds weight count {}", idx, weights.len())); } if weights[*idx].shape != delta.shape { return Err(format!( "Delta shape mismatch at {}: weight {:?}, delta {:?}", idx, weights[*idx].shape, delta.shape )); } for (w, &d) in weights[*idx].data.iter_mut().zip(&delta.data) { *w += d; } } Ok(()) } // ============================================================================ // MESH STREAM HANDLER — WeightSync // ============================================================================ /// Handle an incoming WeightSync mesh frame. /// Receives transformer weight deltas from peer nodes for federated learning. /// Validates checkpoint format (SPFC magic bytes), returns acknowledgment. /// Zero silent drops. /// /// Called from: mesh.rs stream_router() for StreamType::WeightSync (0x07) pub fn handle_weight_sync( frame: &crate::framing::Frame, peer_key: &str, transformer: &Option>>, ) -> Option { let payload_len = frame.payload.len(); let valid_format = payload_len >= 4 && &frame.payload[..4] == CHECKPOINT_MAGIC; eprintln!("[SPF-WEIGHT-SYNC] Received from {}: {} bytes, valid_format={}", &peer_key[..8.min(peer_key.len())], payload_len, valid_format); // Apply weights if valid format and transformer loaded let mut applied = false; let mut apply_error: Option = None; if valid_format { if let Some(ref t) = transformer { match deserialize_weights(&frame.payload) { Ok((checkpoint_weights, meta)) => { match t.write() { Ok(mut state) => { let mut model_weights = state.model.weights_mut(); match apply_weights(&mut model_weights, &checkpoint_weights) { Ok(()) => { applied = true; eprintln!("[SPF-WEIGHT-SYNC] Applied from {}: model={}, step={}", &peer_key[..8.min(peer_key.len())], meta.model_id, meta.step); } Err(e) => { eprintln!("[SPF-WEIGHT-SYNC] Apply failed: {}", e); apply_error = Some(e); } } } Err(e) => { apply_error = Some(format!("Lock: {}", e)); } } } Err(e) => { apply_error = Some(format!("Deserialize: {}", e)); } } } } let ack = serde_json::json!({ "type": "weight_sync_ack", "bytes_received": payload_len, "valid_format": valid_format, "applied": applied, "error": apply_error, "from": peer_key, "status": if applied { "applied" } else if valid_format { "accepted" } else { "rejected" } }); Some(crate::framing::Frame::new( crate::framing::StreamType::WeightSync, ack.to_string().into_bytes(), )) } // ============================================================================ // TESTS // ============================================================================ #[cfg(test)] mod tests { use super::*; fn make_test_weights() -> Vec { vec![ Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap(), Tensor::from_data(vec![0.5, -0.5, 1.5], vec![3]).unwrap(), Tensor::randn(&[4, 8], 42), ] } #[test] fn test_serialize_deserialize_roundtrip() { let weights = make_test_weights(); let refs: Vec<&Tensor> = weights.iter().collect(); let bytes = serialize_weights(&refs, "test_model", 100).unwrap(); let (loaded, meta) = deserialize_weights(&bytes).unwrap(); assert_eq!(meta.format_version, CHECKPOINT_VERSION); assert_eq!(meta.num_tensors, 3); assert_eq!(meta.model_id, "test_model"); assert_eq!(meta.step, 100); assert_eq!(loaded.len(), weights.len()); for (orig, load) in weights.iter().zip(&loaded) { assert_eq!(orig.shape, load.shape); for (a, b) in orig.data.iter().zip(&load.data) { assert!((a - b).abs() < 1e-7, "Data mismatch: {} vs {}", a, b); } } } #[test] fn test_invalid_magic() { let data = b"XXXX\x00\x00\x00\x01\x00\x00\x00\x00"; let result = deserialize_weights(data); assert!(result.is_err()); } #[test] fn test_apply_weights() { let weights = make_test_weights(); let refs: Vec<&Tensor> = weights.iter().collect(); let bytes = serialize_weights(&refs, "test", 0).unwrap(); let (loaded, _) = deserialize_weights(&bytes).unwrap(); let mut target = vec![ Tensor::zeros(&[2, 2]), Tensor::zeros(&[3]), Tensor::zeros(&[4, 8]), ]; let mut target_refs: Vec<&mut Tensor> = target.iter_mut().collect(); apply_weights(&mut target_refs, &loaded).unwrap(); assert_eq!(target[0].data, weights[0].data); assert_eq!(target[1].data, weights[1].data); } #[test] fn test_apply_weights_shape_mismatch() { let loaded = vec![Tensor::zeros(&[3, 3])]; // wrong shape let mut target = vec![Tensor::zeros(&[2, 2])]; let mut target_refs: Vec<&mut Tensor> = target.iter_mut().collect(); assert!(apply_weights(&mut target_refs, &loaded).is_err()); } #[test] fn test_compute_delta() { let old = vec![ Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(), Tensor::from_data(vec![0.0, 0.0], vec![2]).unwrap(), ]; let new = vec![ Tensor::from_data(vec![1.1, 2.0, 3.2], vec![3]).unwrap(), Tensor::from_data(vec![0.0, 0.0], vec![2]).unwrap(), // no change ]; let old_refs: Vec<&Tensor> = old.iter().collect(); let new_refs: Vec<&Tensor> = new.iter().collect(); let deltas = compute_delta(&old_refs, &new_refs, 0.05); assert_eq!(deltas.len(), 1); // Only first tensor changed assert_eq!(deltas[0].0, 0); // Index 0 assert!((deltas[0].1.data[0] - 0.1).abs() < 1e-5); assert!((deltas[0].1.data[2] - 0.2).abs() < 1e-5); } #[test] fn test_apply_delta() { let mut weights = vec![ Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(), Tensor::from_data(vec![10.0, 20.0], vec![2]).unwrap(), ]; let delta = vec![ (0, Tensor::from_data(vec![0.1, 0.2, 0.3], vec![3]).unwrap()), ]; let mut refs: Vec<&mut Tensor> = weights.iter_mut().collect(); apply_delta(&mut refs, &delta).unwrap(); assert!((weights[0].data[0] - 1.1).abs() < 1e-5); assert!((weights[0].data[1] - 2.2).abs() < 1e-5); assert!((weights[0].data[2] - 3.3).abs() < 1e-5); // Second tensor unchanged assert_eq!(weights[1].data[0], 10.0); } #[test] fn test_checkpoint_size() { let weights = make_test_weights(); let refs: Vec<&Tensor> = weights.iter().collect(); let bytes = serialize_weights(&refs, "test", 0).unwrap(); // Header: 4(magic) + 4(ver) + 4(num) + 2(id_len) + 4(id) + 8(step) = 26 // Tensor 0: 4(ndim) + 8(shape) + 16(data) = 28 // Tensor 1: 4(ndim) + 4(shape) + 12(data) = 20 // Tensor 2: 4(ndim) + 8(shape) + 128(data) = 140 // Total: 26 + 28 + 20 + 140 = 214 assert_eq!(bytes.len(), 214); } #[test] fn test_large_model_id() { let weights = vec![Tensor::zeros(&[1])]; let refs: Vec<&Tensor> = weights.iter().collect(); let long_id = "x".repeat(70000); // Should fail — model ID > u16::MAX assert!(serialize_weights(&refs, &long_id, 0).is_err()); } }