| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
| use std::io::{self, Read, Write, Cursor}; |
|
|
| |
| |
| |
|
|
| |
| const CHECKPOINT_MAGIC: &[u8; 4] = b"SPFC"; |
|
|
| |
| const CHECKPOINT_VERSION: u32 = 1; |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct CheckpointMeta { |
| |
| pub format_version: u32, |
| |
| pub num_tensors: u32, |
| |
| pub total_bytes: u64, |
| |
| pub timestamp: String, |
| |
| pub model_id: String, |
| |
| pub step: u64, |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn serialize_weights( |
| weights: &[&Tensor], |
| model_id: &str, |
| step: u64, |
| ) -> Result<Vec<u8>, io::Error> { |
| let mut buf: Vec<u8> = Vec::new(); |
|
|
| |
| buf.write_all(CHECKPOINT_MAGIC)?; |
| buf.write_all(&CHECKPOINT_VERSION.to_be_bytes())?; |
| buf.write_all(&(weights.len() as u32).to_be_bytes())?; |
|
|
| |
| let id_bytes = model_id.as_bytes(); |
| if id_bytes.len() > u16::MAX as usize { |
| return Err(io::Error::new(io::ErrorKind::InvalidInput, "Model ID too long")); |
| } |
| buf.write_all(&(id_bytes.len() as u16).to_be_bytes())?; |
| buf.write_all(id_bytes)?; |
|
|
| |
| buf.write_all(&step.to_be_bytes())?; |
|
|
| |
| for tensor in weights { |
| |
| buf.write_all(&(tensor.ndim() as u32).to_be_bytes())?; |
|
|
| |
| for &dim in &tensor.shape { |
| buf.write_all(&(dim as u32).to_be_bytes())?; |
| } |
|
|
| |
| for &val in &tensor.data { |
| buf.write_all(&val.to_le_bytes())?; |
| } |
| } |
|
|
| Ok(buf) |
| } |
|
|
| |
| |
| pub fn deserialize_weights(data: &[u8]) -> Result<(Vec<Tensor>, CheckpointMeta), io::Error> { |
| let mut cursor = Cursor::new(data); |
|
|
| |
| let mut magic = [0u8; 4]; |
| cursor.read_exact(&mut magic)?; |
| if &magic != CHECKPOINT_MAGIC { |
| return Err(io::Error::new( |
| io::ErrorKind::InvalidData, |
| format!("Invalid checkpoint magic: {:?}", magic), |
| )); |
| } |
|
|
| |
| let mut ver_buf = [0u8; 4]; |
| cursor.read_exact(&mut ver_buf)?; |
| let version = u32::from_be_bytes(ver_buf); |
| if version != CHECKPOINT_VERSION { |
| return Err(io::Error::new( |
| io::ErrorKind::InvalidData, |
| format!("Unsupported checkpoint version: {} (expected {})", version, CHECKPOINT_VERSION), |
| )); |
| } |
|
|
| |
| let mut nt_buf = [0u8; 4]; |
| cursor.read_exact(&mut nt_buf)?; |
| let num_tensors = u32::from_be_bytes(nt_buf); |
|
|
| |
| let mut id_len_buf = [0u8; 2]; |
| cursor.read_exact(&mut id_len_buf)?; |
| let id_len = u16::from_be_bytes(id_len_buf) as usize; |
| let mut id_buf = vec![0u8; id_len]; |
| cursor.read_exact(&mut id_buf)?; |
| let model_id = String::from_utf8(id_buf) |
| .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; |
|
|
| |
| let mut step_buf = [0u8; 8]; |
| cursor.read_exact(&mut step_buf)?; |
| let step = u64::from_be_bytes(step_buf); |
|
|
| |
| let mut tensors = Vec::with_capacity(num_tensors as usize); |
| let mut total_bytes: u64 = 0; |
|
|
| for _ in 0..num_tensors { |
| |
| let mut ndim_buf = [0u8; 4]; |
| cursor.read_exact(&mut ndim_buf)?; |
| let ndim = u32::from_be_bytes(ndim_buf) as usize; |
|
|
| |
| let mut shape = Vec::with_capacity(ndim); |
| for _ in 0..ndim { |
| let mut dim_buf = [0u8; 4]; |
| cursor.read_exact(&mut dim_buf)?; |
| shape.push(u32::from_be_bytes(dim_buf) as usize); |
| } |
|
|
| |
| let numel: usize = shape.iter().product(); |
| let mut data = Vec::with_capacity(numel); |
| for _ in 0..numel { |
| let mut f_buf = [0u8; 4]; |
| cursor.read_exact(&mut f_buf)?; |
| data.push(f32::from_le_bytes(f_buf)); |
| } |
|
|
| total_bytes += (numel * 4) as u64; |
| tensors.push(Tensor { data, shape }); |
| } |
|
|
| let meta = CheckpointMeta { |
| format_version: version, |
| num_tensors, |
| total_bytes, |
| timestamp: String::new(), |
| model_id, |
| step, |
| }; |
|
|
| Ok((tensors, meta)) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn apply_weights( |
| model_weights: &mut [&mut Tensor], |
| checkpoint_weights: &[Tensor], |
| ) -> Result<(), String> { |
| if model_weights.len() != checkpoint_weights.len() { |
| return Err(format!( |
| "Weight count mismatch: model has {}, checkpoint has {}", |
| model_weights.len(), checkpoint_weights.len() |
| )); |
| } |
|
|
| for (i, (model_w, ckpt_w)) in model_weights.iter_mut().zip(checkpoint_weights.iter()).enumerate() { |
| if model_w.shape != ckpt_w.shape { |
| return Err(format!( |
| "Shape mismatch at weight {}: model {:?}, checkpoint {:?}", |
| i, model_w.shape, ckpt_w.shape |
| )); |
| } |
| model_w.data.copy_from_slice(&ckpt_w.data); |
| } |
|
|
| Ok(()) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub fn compute_delta( |
| old_weights: &[&Tensor], |
| new_weights: &[&Tensor], |
| threshold: f32, |
| ) -> Vec<(usize, Tensor)> { |
| let mut deltas = Vec::new(); |
|
|
| for (i, (old, new)) in old_weights.iter().zip(new_weights.iter()).enumerate() { |
| if old.shape != new.shape { |
| continue; |
| } |
|
|
| let diff: Vec<f32> = old.data.iter() |
| .zip(&new.data) |
| .map(|(&a, &b)| b - a) |
| .collect(); |
|
|
| |
| let has_change = diff.iter().any(|&d| d.abs() > threshold); |
| if has_change { |
| deltas.push((i, Tensor { |
| data: diff, |
| shape: old.shape.clone(), |
| })); |
| } |
| } |
|
|
| deltas |
| } |
|
|
| |
| |
| pub fn apply_delta( |
| weights: &mut [&mut Tensor], |
| deltas: &[(usize, Tensor)], |
| ) -> Result<(), String> { |
| for (idx, delta) in deltas { |
| if *idx >= weights.len() { |
| return Err(format!("Delta index {} exceeds weight count {}", idx, weights.len())); |
| } |
| if weights[*idx].shape != delta.shape { |
| return Err(format!( |
| "Delta shape mismatch at {}: weight {:?}, delta {:?}", |
| idx, weights[*idx].shape, delta.shape |
| )); |
| } |
| for (w, &d) in weights[*idx].data.iter_mut().zip(&delta.data) { |
| *w += d; |
| } |
| } |
| Ok(()) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| pub fn handle_weight_sync( |
| frame: &crate::framing::Frame, |
| peer_key: &str, |
| transformer: &Option<std::sync::Arc<std::sync::RwLock<crate::transformer_tools::TransformerState>>>, |
| ) -> Option<crate::framing::Frame> { |
| let payload_len = frame.payload.len(); |
| let valid_format = payload_len >= 4 && &frame.payload[..4] == CHECKPOINT_MAGIC; |
|
|
| eprintln!("[SPF-WEIGHT-SYNC] Received from {}: {} bytes, valid_format={}", |
| &peer_key[..8.min(peer_key.len())], payload_len, valid_format); |
|
|
| |
| let mut applied = false; |
| let mut apply_error: Option<String> = None; |
| if valid_format { |
| if let Some(ref t) = transformer { |
| match deserialize_weights(&frame.payload) { |
| Ok((checkpoint_weights, meta)) => { |
| match t.write() { |
| Ok(mut state) => { |
| let mut model_weights = state.model.weights_mut(); |
| match apply_weights(&mut model_weights, &checkpoint_weights) { |
| Ok(()) => { |
| applied = true; |
| eprintln!("[SPF-WEIGHT-SYNC] Applied from {}: model={}, step={}", |
| &peer_key[..8.min(peer_key.len())], |
| meta.model_id, meta.step); |
| } |
| Err(e) => { |
| eprintln!("[SPF-WEIGHT-SYNC] Apply failed: {}", e); |
| apply_error = Some(e); |
| } |
| } |
| } |
| Err(e) => { apply_error = Some(format!("Lock: {}", e)); } |
| } |
| } |
| Err(e) => { apply_error = Some(format!("Deserialize: {}", e)); } |
| } |
| } |
| } |
|
|
| let ack = serde_json::json!({ |
| "type": "weight_sync_ack", |
| "bytes_received": payload_len, |
| "valid_format": valid_format, |
| "applied": applied, |
| "error": apply_error, |
| "from": peer_key, |
| "status": if applied { "applied" } else if valid_format { "accepted" } else { "rejected" } |
| }); |
| Some(crate::framing::Frame::new( |
| crate::framing::StreamType::WeightSync, |
| ack.to_string().into_bytes(), |
| )) |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| fn make_test_weights() -> Vec<Tensor> { |
| vec![ |
| Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap(), |
| Tensor::from_data(vec![0.5, -0.5, 1.5], vec![3]).unwrap(), |
| Tensor::randn(&[4, 8], 42), |
| ] |
| } |
|
|
| #[test] |
| fn test_serialize_deserialize_roundtrip() { |
| let weights = make_test_weights(); |
| let refs: Vec<&Tensor> = weights.iter().collect(); |
|
|
| let bytes = serialize_weights(&refs, "test_model", 100).unwrap(); |
| let (loaded, meta) = deserialize_weights(&bytes).unwrap(); |
|
|
| assert_eq!(meta.format_version, CHECKPOINT_VERSION); |
| assert_eq!(meta.num_tensors, 3); |
| assert_eq!(meta.model_id, "test_model"); |
| assert_eq!(meta.step, 100); |
|
|
| assert_eq!(loaded.len(), weights.len()); |
| for (orig, load) in weights.iter().zip(&loaded) { |
| assert_eq!(orig.shape, load.shape); |
| for (a, b) in orig.data.iter().zip(&load.data) { |
| assert!((a - b).abs() < 1e-7, "Data mismatch: {} vs {}", a, b); |
| } |
| } |
| } |
|
|
| #[test] |
| fn test_invalid_magic() { |
| let data = b"XXXX\x00\x00\x00\x01\x00\x00\x00\x00"; |
| let result = deserialize_weights(data); |
| assert!(result.is_err()); |
| } |
|
|
| #[test] |
| fn test_apply_weights() { |
| let weights = make_test_weights(); |
| let refs: Vec<&Tensor> = weights.iter().collect(); |
| let bytes = serialize_weights(&refs, "test", 0).unwrap(); |
| let (loaded, _) = deserialize_weights(&bytes).unwrap(); |
|
|
| let mut target = vec![ |
| Tensor::zeros(&[2, 2]), |
| Tensor::zeros(&[3]), |
| Tensor::zeros(&[4, 8]), |
| ]; |
| let mut target_refs: Vec<&mut Tensor> = target.iter_mut().collect(); |
| apply_weights(&mut target_refs, &loaded).unwrap(); |
|
|
| assert_eq!(target[0].data, weights[0].data); |
| assert_eq!(target[1].data, weights[1].data); |
| } |
|
|
| #[test] |
| fn test_apply_weights_shape_mismatch() { |
| let loaded = vec![Tensor::zeros(&[3, 3])]; |
| let mut target = vec![Tensor::zeros(&[2, 2])]; |
| let mut target_refs: Vec<&mut Tensor> = target.iter_mut().collect(); |
| assert!(apply_weights(&mut target_refs, &loaded).is_err()); |
| } |
|
|
| #[test] |
| fn test_compute_delta() { |
| let old = vec![ |
| Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(), |
| Tensor::from_data(vec![0.0, 0.0], vec![2]).unwrap(), |
| ]; |
| let new = vec![ |
| Tensor::from_data(vec![1.1, 2.0, 3.2], vec![3]).unwrap(), |
| Tensor::from_data(vec![0.0, 0.0], vec![2]).unwrap(), |
| ]; |
| let old_refs: Vec<&Tensor> = old.iter().collect(); |
| let new_refs: Vec<&Tensor> = new.iter().collect(); |
|
|
| let deltas = compute_delta(&old_refs, &new_refs, 0.05); |
| assert_eq!(deltas.len(), 1); |
| assert_eq!(deltas[0].0, 0); |
| assert!((deltas[0].1.data[0] - 0.1).abs() < 1e-5); |
| assert!((deltas[0].1.data[2] - 0.2).abs() < 1e-5); |
| } |
|
|
| #[test] |
| fn test_apply_delta() { |
| let mut weights = vec![ |
| Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(), |
| Tensor::from_data(vec![10.0, 20.0], vec![2]).unwrap(), |
| ]; |
| let delta = vec![ |
| (0, Tensor::from_data(vec![0.1, 0.2, 0.3], vec![3]).unwrap()), |
| ]; |
|
|
| let mut refs: Vec<&mut Tensor> = weights.iter_mut().collect(); |
| apply_delta(&mut refs, &delta).unwrap(); |
|
|
| assert!((weights[0].data[0] - 1.1).abs() < 1e-5); |
| assert!((weights[0].data[1] - 2.2).abs() < 1e-5); |
| assert!((weights[0].data[2] - 3.3).abs() < 1e-5); |
| |
| assert_eq!(weights[1].data[0], 10.0); |
| } |
|
|
| #[test] |
| fn test_checkpoint_size() { |
| let weights = make_test_weights(); |
| let refs: Vec<&Tensor> = weights.iter().collect(); |
| let bytes = serialize_weights(&refs, "test", 0).unwrap(); |
|
|
| |
| |
| |
| |
| |
| assert_eq!(bytes.len(), 214); |
| } |
|
|
| #[test] |
| fn test_large_model_id() { |
| let weights = vec![Tensor::zeros(&[1])]; |
| let refs: Vec<&Tensor> = weights.iter().collect(); |
| let long_id = "x".repeat(70000); |
| |
| assert!(serialize_weights(&refs, &long_id, 0).is_err()); |
| } |
| } |
|
|