SPFsmartGATE / src /integration_tests.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Integration Test Suite (Block R)
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// End-to-end tests validating cross-module interactions across ALL blocks.
// Place in: tests/integration.rs (Rust integration test convention)
//
// Tests cover:
// 1-5: Foundation + Model (Blocks A-E)
// 6-8: Mesh Streaming (Blocks F, G, H)
// 9-11: SPF Integration (Blocks I, J, K)
// 12-13: Learning Core (Blocks L, M)
// 14-15: Pipeline + Worker (Blocks N, O)
// 16-17: Communication (Blocks P, Q)
// 18-20: Regression (existing 55 tools, gate, dispatch)
//
// Depends on: ALL previous blocks (A through Q)
// NOTE: In the actual crate, these would use:
// use spf_smart_gate::{tensor, tokenizer, attention, ffn, ...};
// For DEPLOY review, module paths reference the crate root.
#[cfg(test)]
mod integration {
// ================================================================
// TEST 1: Tensor → Attention → FFN pipeline (Blocks A + C)
// Verifies tensor ops feed correctly through attention + FFN
// ================================================================
#[test]
fn test_tensor_attention_ffn_pipeline() {
use crate::tensor::Tensor;
use crate::attention::MultiHeadAttention;
use crate::ffn::FeedForwardNetwork;
let d_model = 64;
let n_heads = 4;
let seq_len = 8;
let batch_size = 1; // [1, seq_len, d_model] reshaped as [seq_len, d_model]
// Create input tensor [seq_len, d_model]
let input = Tensor::randn(&[seq_len, d_model]);
// Pass through attention
let attn = MultiHeadAttention::new(d_model, n_heads);
let attn_out = attn.forward(&input, &input, &input, None)
.expect("Attention forward failed");
assert_eq!(attn_out.shape(), &[seq_len, d_model]);
// Pass through FFN
let ffn = FeedForwardNetwork::new(d_model, d_model * 4);
let ffn_out = ffn.forward(&attn_out)
.expect("FFN forward failed");
assert_eq!(ffn_out.shape(), &[seq_len, d_model]);
// Output should contain finite values
let data = ffn_out.data();
assert!(data.iter().all(|v| v.is_finite()), "Output contains NaN/Inf");
}
// ================================================================
// TEST 2: Tokenizer round-trip with transformer vocab (Blocks B + E)
// Verifies tokenizer IDs are within transformer vocab range
// ================================================================
#[test]
fn test_tokenizer_vocab_transformer_compat() {
use crate::tokenizer::Tokenizer;
use crate::transformer::TransformerModelConfig;
let config = TransformerModelConfig {
vocab_size: 256, // Byte-level minimum
d_model: 32,
n_heads: 2,
n_layers: 1,
d_ff: 128,
max_seq_len: 64,
};
// Create minimal tokenizer (byte-level, no merges)
let tokenizer = Tokenizer::new();
let text = "Hello SPF";
let ids = tokenizer.encode(text);
// All IDs must be within transformer vocab
for id in &ids {
assert!(*id < config.vocab_size,
"Token ID {} exceeds vocab_size {}", id, config.vocab_size);
}
// Round-trip
let decoded = tokenizer.decode(&ids);
assert_eq!(decoded.trim(), text.trim(),
"Tokenizer round-trip failed: '{}' → {:?} → '{}'", text, ids, decoded);
}
// ================================================================
// TEST 3: Full transformer forward pass (Blocks A-E)
// End-to-end: input IDs → embeddings → encoder → decoder → logits
// ================================================================
#[test]
fn test_full_transformer_forward() {
use crate::transformer::{SPFTransformer, TransformerModelConfig};
let config = TransformerModelConfig {
vocab_size: 128,
d_model: 32,
n_heads: 2,
n_layers: 1,
d_ff: 128,
max_seq_len: 16,
};
let model = SPFTransformer::new(config.clone(), 42);
let input_ids = vec![1, 5, 10, 15, 20];
// Causal (decoder-only) forward
let logits = model.forward_causal(&input_ids);
// Should return logits of shape [seq_len, vocab_size]
assert_eq!(logits.shape()[0], input_ids.len());
assert_eq!(logits.shape()[1], config.vocab_size);
// All logits should be finite
assert!(logits.data().iter().all(|v| v.is_finite()),
"Logits contain NaN/Inf");
}
// ================================================================
// TEST 4: Checkpoint save → load → verify identical output (Block E)
// ================================================================
#[test]
fn test_checkpoint_roundtrip_preserves_output() {
use crate::transformer::{SPFTransformer, TransformerModelConfig};
use crate::checkpoint;
let config = TransformerModelConfig {
vocab_size: 64,
d_model: 16,
n_heads: 2,
n_layers: 1,
d_ff: 64,
max_seq_len: 8,
};
let model = SPFTransformer::new(config.clone(), 42);
let input_ids = vec![1, 2, 3, 4];
// Forward pass before save
let logits_before = model.forward_causal(&input_ids);
// Serialize weights
let weights_refs = model.weights();
let data = checkpoint::serialize_weights(&weights_refs, "test", 0)
.expect("Serialize should succeed");
assert!(!data.is_empty(), "Checkpoint data should not be empty");
// Create fresh model and load weights
let mut model2 = SPFTransformer::new(config.clone(), 42);
let (checkpoint_weights, _meta) = checkpoint::deserialize_weights(&data)
.expect("Deserialize should succeed");
let mut model2_weights = model2.weights_mut();
checkpoint::apply_weights(&mut model2_weights, &checkpoint_weights)
.expect("Apply weights should succeed");
// Forward pass after load
let logits_after = model2.forward_causal(&input_ids);
// Outputs should be identical
let before_data = logits_before.data();
let after_data = logits_after.data();
assert_eq!(before_data.len(), after_data.len());
for (a, b) in before_data.iter().zip(after_data.iter()) {
assert!((a - b).abs() < 1e-6,
"Checkpoint load changed output: {} vs {}", a, b);
}
}
// ================================================================
// TEST 5: Autoregressive generation (Block E)
// ================================================================
#[test]
fn test_autoregressive_generation() {
use crate::transformer::{SPFTransformer, TransformerModelConfig};
let config = TransformerModelConfig {
vocab_size: 64,
d_model: 16,
n_heads: 2,
n_layers: 1,
d_ff: 64,
max_seq_len: 32,
};
let model = SPFTransformer::new(config.clone(), 42);
let prompt = vec![1, 2, 3];
let max_new = 10;
let output = model.generate(&prompt, max_new, 0.8, 42)
.expect("Generate should succeed");
// Should generate at least some tokens
assert!(output.len() > prompt.len(),
"Generate should produce more tokens than prompt");
assert!(output.len() <= prompt.len() + max_new,
"Generate should not exceed max_new");
// All generated IDs should be within vocab
for id in &output {
assert!(*id < config.vocab_size,
"Generated ID {} exceeds vocab {}", id, config.vocab_size);
}
}
// ================================================================
// TEST 6: Framing protocol round-trip (Block F)
// 100 messages encoded → decoded with correct types
// ================================================================
#[test]
fn test_framing_100_messages_roundtrip() {
use crate::framing::{Frame, StreamType};
let types = [
StreamType::ToolRpc,
StreamType::ChatText,
StreamType::VoiceAudio,
StreamType::PipelineTask,
StreamType::PipelineResult,
StreamType::BrainSync,
StreamType::WeightSync,
StreamType::Control,
];
for i in 0..100 {
let stream_type = types[i % types.len()];
let payload = format!("message_{:03}", i);
let frame = Frame::new(stream_type, payload.as_bytes().to_vec());
// Encode
let bytes = frame.to_bytes();
assert!(bytes.len() >= 5, "Frame must have at least 5 byte header");
// Decode
let (parsed, consumed) = crate::framing::parse_frame(&bytes)
.expect("Should parse successfully")
.expect("Should have complete frame");
assert_eq!(consumed, bytes.len());
assert_eq!(parsed.stream_type, stream_type);
assert_eq!(parsed.payload_str().unwrap(), payload);
}
}
// ================================================================
// TEST 7: Stream type detection — legacy vs framed (Blocks F + G)
// ================================================================
#[test]
fn test_protocol_detection_bytes() {
// Legacy JSON-RPC starts with '{'
let legacy_first_byte = b'{';
assert_eq!(legacy_first_byte, 0x7B);
// Must NOT overlap with any StreamType
assert!(legacy_first_byte > 0x08,
"Legacy byte must not overlap with stream type range 0x01-0x08");
// Framed protocol stream types
use crate::framing::StreamType;
let framed_types = [
(StreamType::ToolRpc, 0x01u8),
(StreamType::ChatText, 0x02),
(StreamType::VoiceAudio, 0x03),
(StreamType::PipelineTask, 0x04),
(StreamType::PipelineResult, 0x05),
(StreamType::BrainSync, 0x06),
(StreamType::WeightSync, 0x07),
(StreamType::Control, 0x08),
];
for (st, expected_byte) in &framed_types {
let frame = crate::framing::Frame::new(*st, vec![0x00]);
let bytes = frame.to_bytes();
assert_eq!(bytes[0], *expected_byte,
"StreamType {:?} should encode as 0x{:02X}", st, expected_byte);
}
}
// ================================================================
// TEST 8: Source enum completeness (Block I)
// All 5 Source variants serialize/deserialize correctly
// ================================================================
#[test]
fn test_source_variants_complete() {
use crate::dispatch::Source;
let variants: Vec<Source> = vec![
Source::Stdio,
Source::Http,
Source::Mesh { peer_key: "abc123".into() },
Source::Transformer { role: "writer".into(), model_id: "v1".into() },
Source::Pipeline { stream_id: "s1".into(), peer_key: "pk1".into() },
];
for src in &variants {
let json = serde_json::to_string(src)
.expect("Source should serialize");
let _back: Source = serde_json::from_str(&json)
.expect("Source should deserialize");
}
// Verify we have exactly 5 variants
assert_eq!(variants.len(), 5, "Should have 5 Source variants");
}
// ================================================================
// TEST 9: TransformerConfig → TransformerState (Blocks I + K)
// Config creates valid state with correct dimensions
// ================================================================
#[test]
fn test_config_to_state_integration() {
use crate::config::TransformerConfig;
use crate::transformer_tools::TransformerState;
let config = TransformerConfig {
enabled: true,
d_model: 64,
n_heads: 4,
n_layers: 2,
vocab_size: 256,
max_seq_len: 32,
d_ff: 256,
..TransformerConfig::default()
};
let state = TransformerState::from_config(&config, "writer");
assert_eq!(state.role, "writer");
assert_eq!(state.training_step, 0);
assert!(!state.is_training);
// Verify model can run forward pass
let input_ids = vec![1, 2, 3];
let logits = state.model.forward_causal(&input_ids);
assert_eq!(logits.shape()[1], 256); // vocab_size
}
// ================================================================
// TEST 10: Transformer tools status reporting (Block K)
// ================================================================
#[test]
fn test_transformer_tools_status() {
use crate::config::TransformerConfig;
use crate::transformer_tools;
let config = TransformerConfig::default();
// Not loaded
let result = transformer_tools::handle_status(&None, &config);
let text = result["text"].as_str().unwrap();
assert!(text.contains("NOT LOADED"));
// Loaded
let state = transformer_tools::TransformerState::from_config(&config, "researcher");
let locked = std::sync::Arc::new(std::sync::RwLock::new(state));
let result = transformer_tools::handle_status(&Some(locked), &config);
let text = result["text"].as_str().unwrap();
assert!(text.contains("LOADED"));
assert!(text.contains("researcher"));
}
// ================================================================
// TEST 11: Gate training signal format (Block J)
// Verify TrainingSignal serializes correctly for LMDB storage
// ================================================================
#[test]
fn test_gate_training_signal_format() {
use crate::gate_training::{TrainingSignal, SignalBuffer};
let signal = TrainingSignal {
tool: "spf_read".to_string(),
allowed: true,
label: 1.0,
weight: 1.0,
complexity: 15,
duration_ms: 42,
timestamp: "2026-02-28T12:00:00Z".to_string(),
source_type: "stdio".to_string(),
};
// Should serialize for LMDB
let json = serde_json::to_string(&signal).unwrap();
let back: TrainingSignal = serde_json::from_str(&json).unwrap();
assert_eq!(back.tool, "spf_read");
assert!(back.allowed);
assert_eq!(back.complexity, 15);
// Signal buffer should accept it
let mut buffer = SignalBuffer::new(100);
buffer.add(signal);
assert_eq!(buffer.len(), 1);
}
// ================================================================
// TEST 12: Training batch → loss computation (Blocks J + L)
// Gate signals → training examples → loss value
// ================================================================
#[test]
fn test_training_batch_loss() {
use crate::tensor::Tensor;
use crate::train;
// Simulate logits and targets for binary gate decision
let batch_size = 4;
let logits = Tensor::randn(&[batch_size, 2]); // binary: allow/block
let targets = vec![1, 0, 1, 1]; // ground truth labels
// Create target tensor (one-hot)
let mut target_data = vec![0.0f32; batch_size * 2];
for (i, &t) in targets.iter().enumerate() {
target_data[i * 2 + t] = 1.0;
}
let target_tensor = Tensor::from_data(&[batch_size, 2], target_data);
let loss = train::cross_entropy_loss(&logits, &target_tensor);
assert!(loss.is_finite(), "Loss should be finite, got {}", loss);
assert!(loss >= 0.0, "Cross-entropy loss should be non-negative");
}
// FL-9: TEST 13 (LR scheduling) removed — LRScheduler deleted.
// ================================================================
// TEST 14: Pipeline batch → result collection (Block N)
// Submit batch → dispatch → collect results
// ================================================================
#[test]
fn test_pipeline_batch_lifecycle() {
use crate::pipeline::*;
let mut state = PipelineState::new();
// Submit a parallel batch of 3 tasks
let batch = PipelineBatch {
stream_id: "integration_test_stream".to_string(),
tasks: vec![
PipelineTask {
task_id: "it_1".into(),
stream_id: "integration_test_stream".into(),
tool: "spf_read".into(),
args: serde_json::json!({"file_path": "/test"}),
chain_next: None,
priority: 0,
timeout_ms: 5000,
pipe_field: None,
},
PipelineTask {
task_id: "it_2".into(),
stream_id: "integration_test_stream".into(),
tool: "spf_glob".into(),
args: serde_json::json!({"pattern": "*.rs"}),
chain_next: None,
priority: 0,
timeout_ms: 5000,
pipe_field: None,
},
PipelineTask {
task_id: "it_3".into(),
stream_id: "integration_test_stream".into(),
tool: "spf_grep".into(),
args: serde_json::json!({"pattern": "test"}),
chain_next: None,
priority: 0,
timeout_ms: 5000,
pipe_field: None,
},
],
mode: BatchMode::Parallel,
submitted_by: "integration_test".into(),
submitted_at: "2026-02-28T12:00:00Z".into(),
};
let sid = state.submit_batch(batch).expect("Submit should succeed");
assert_eq!(sid, "integration_test_stream");
assert!(!state.is_stream_complete(&sid));
// Dispatch all tasks
let mut dispatched = Vec::new();
while let Some(task) = state.next_task(&sid) {
dispatched.push(task);
}
assert_eq!(dispatched.len(), 3);
// Record results
for task in &dispatched {
state.record_result(PipelineResult {
task_id: task.task_id.clone(),
stream_id: sid.clone(),
status: PipelineStatus::Ok,
result: serde_json::json!({"output": "done"}),
error: None,
duration_ms: 10,
executed_by: "test_worker".into(),
});
}
assert!(state.is_stream_complete(&sid));
assert_eq!(state.total_completed, 3);
assert_eq!(state.total_failed, 0);
}
// ================================================================
// TEST 15: Worker init with config (Block O)
// ================================================================
#[test]
fn test_worker_init_from_config() {
use crate::config::TransformerConfig;
use crate::worker::{WorkerConfig, WorkerState, init_transformer};
let tc = TransformerConfig {
enabled: true,
d_model: 32,
n_heads: 2,
n_layers: 1,
vocab_size: 64,
max_seq_len: 16,
d_ff: 128,
..TransformerConfig::default()
};
let wc = WorkerConfig {
role: "writer".into(),
..WorkerConfig::default()
};
// Should succeed (no checkpoint = random weights)
let state = init_transformer(&tc, &wc).expect("Init should succeed");
assert_eq!(state.role, "writer");
// Worker state should track pipeline
let ws = WorkerState::new(wc, tc);
let status = ws.status_json();
assert_eq!(status["mode"], "worker");
assert_eq!(status["role"], "writer");
}
// ================================================================
// TEST 16: Chat engine multi-turn with context (Block P)
// ================================================================
#[test]
fn test_chat_multi_turn_context() {
use crate::chat::{ChatEngine, ChatMessage, MessageType};
let mut engine = ChatEngine::new("agent_001".into());
// Simulate multi-turn conversation
let messages = vec![
("user", "What tools does SPF have?"),
("agent_001", "SPF has 55+ tools across 6 categories."),
("user", "Tell me about the gate."),
("agent_001", "The gate enforces security with default-deny."),
("user", "How does training work?"),
];
for (from, text) in &messages {
let msg = ChatMessage {
id: format!("msg_{}", text.len()),
from: from.to_string(),
to: if *from == "user" { "agent_001" } else { "user" }.to_string(),
text: text.to_string(),
timestamp: "2026-02-28T12:00:00Z".into(),
conversation_id: "conv_integration".into(),
msg_type: if *from == "user" { MessageType::UserText } else { MessageType::AgentResponse },
};
engine.receive_message(msg);
}
assert_eq!(engine.conversation_count(), 1);
assert_eq!(engine.total_messages(), 5);
// Context should contain recent messages
let ctx = engine.get_context("conv_integration").unwrap();
assert!(ctx.contains("How does training work?"));
assert!(ctx.contains("default-deny"));
// History limit
let last_2 = engine.get_history("conv_integration", 2);
assert_eq!(last_2.len(), 2);
assert_eq!(last_2[1].text, "How does training work?");
}
// ================================================================
// TEST 17: Voice stubs all return not-available (Block Q)
// ================================================================
#[test]
fn test_voice_stubs_all_unavailable() {
use crate::voice::*;
let mut input = StubAudioInput::new();
assert!(!input.is_available());
assert!(input.read_frame().is_err());
let mut output = StubAudioOutput::new();
assert!(!output.is_available());
assert!(output.write_frame(&[0; 640]).is_err());
let mut stt = StubSTT;
assert!(!stt.is_available());
assert!(stt.transcribe(&[]).is_err());
assert!(stt.supported_languages().is_empty());
let mut tts = StubTTS;
assert!(!tts.is_available());
assert!(tts.synthesize("hello").is_err());
assert!(tts.available_voices().is_empty());
// Voice status should report all unavailable
let status = VoiceStatus::from_stubs();
assert!(!status.audio_input_available);
assert!(!status.stt_available);
assert!(!status.tts_available);
assert_eq!(status.codec, "opus");
}
// ================================================================
// TEST 18: Pipeline frame encoding ↔ mesh framing (Blocks F + N)
// Pipeline tasks survive frame encode → mesh transport → decode
// ================================================================
#[test]
fn test_pipeline_mesh_frame_integration() {
use crate::pipeline::{PipelineTask, PipelineResult, PipelineStatus};
use crate::framing::StreamType;
// Encode task as pipeline frame
let task = PipelineTask {
task_id: "mesh_t1".into(),
stream_id: "mesh_stream".into(),
tool: "spf_brain_search".into(),
args: serde_json::json!({"query": "transformer architecture"}),
chain_next: None,
priority: 1,
timeout_ms: 10000,
pipe_field: None,
};
let frame = crate::pipeline::task_to_frame(&task);
assert_eq!(frame.stream_type, StreamType::PipelineTask);
// Simulate mesh transport: encode → bytes → decode
let wire_bytes = frame.to_bytes();
let (received, _) = crate::framing::parse_frame(&wire_bytes)
.unwrap().unwrap();
assert_eq!(received.stream_type, StreamType::PipelineTask);
// Decode task from received frame
let decoded_task = crate::pipeline::task_from_frame(&received).unwrap();
assert_eq!(decoded_task.task_id, "mesh_t1");
assert_eq!(decoded_task.tool, "spf_brain_search");
// Same for results
let result = PipelineResult {
task_id: "mesh_t1".into(),
stream_id: "mesh_stream".into(),
status: PipelineStatus::Ok,
result: serde_json::json!({"matches": 5}),
error: None,
duration_ms: 42,
executed_by: "worker_1".into(),
};
let result_frame = crate::pipeline::result_to_frame(&result);
assert_eq!(result_frame.stream_type, StreamType::PipelineResult);
let result_bytes = result_frame.to_bytes();
let (recv_result, _) = crate::framing::parse_frame(&result_bytes)
.unwrap().unwrap();
let decoded_result = crate::pipeline::result_from_frame(&recv_result).unwrap();
assert_eq!(decoded_result.status, PipelineStatus::Ok);
assert_eq!(decoded_result.executed_by, "worker_1");
}
// ================================================================
// TEST 19: Chat ↔ mesh frame integration (Blocks F + P)
// Chat messages survive frame encode → transport → decode
// ================================================================
#[test]
fn test_chat_mesh_frame_integration() {
use crate::chat::{ChatMessage, MessageType};
use crate::framing::{Frame, StreamType};
let msg = ChatMessage {
id: "chat_mesh_1".into(),
from: "peer_a".into(),
to: "peer_b".into(),
text: "Hello through the mesh!".into(),
timestamp: "2026-02-28T12:00:00Z".into(),
conversation_id: "mesh_conv_1".into(),
msg_type: MessageType::UserText,
};
// Encode as chat frame
let payload = crate::chat::message_to_bytes(&msg).unwrap();
let frame = Frame::new(StreamType::ChatText, payload);
assert_eq!(frame.stream_type, StreamType::ChatText);
// Wire transport simulation
let bytes = frame.to_bytes();
let (received, _) = crate::framing::parse_frame(&bytes)
.unwrap().unwrap();
// Decode
let decoded = crate::chat::message_from_bytes(&received.payload).unwrap();
assert_eq!(decoded.from, "peer_a");
assert_eq!(decoded.text, "Hello through the mesh!");
assert_eq!(decoded.conversation_id, "mesh_conv_1");
}
// ================================================================
// TEST 20: TransformerConfig param estimates match model (Blocks I + E)
// Config estimated_params() should roughly match actual model
// ================================================================
#[test]
fn test_config_param_estimate_reasonable() {
use crate::config::TransformerConfig;
// Default SPF config
let config = TransformerConfig::default();
let estimated = config.estimated_params();
// Should be in reasonable range for d_model=256, n_layers=6
assert!(estimated > 1_000_000, "Should be >1M params, got {}", estimated);
assert!(estimated < 20_000_000, "Should be <20M params, got {}", estimated);
// Memory should be manageable on mobile
let mem_mb = config.estimated_memory_bytes() / 1_000_000;
assert!(mem_mb < 500, "Memory should be <500MB, got {}MB", mem_mb);
// Small config should have fewer params
let small = TransformerConfig {
d_model: 32,
n_heads: 2,
n_layers: 1,
vocab_size: 64,
d_ff: 128,
..TransformerConfig::default()
};
let small_params = small.estimated_params();
assert!(small_params < estimated,
"Small config ({}) should have fewer params than default ({})",
small_params, estimated);
}
}