// SPF Smart Gateway - Worker Mode // Copyright 2026 Joseph Stone - All Rights Reserved // // BLOCK O — Headless worker node that runs transformer inference // without a Claude API session. Accepts tasks via mesh pipeline, // executes them locally, streams results back. // // Worker boot: // 1. Load config (same as normal serve mode) // 2. Load transformer checkpoint // 3. Start mesh (accept pipeline streams) // 4. Start HTTP (monitoring endpoints only) // 5. Loop: receive task → transformer decides tool → execute → return result // 6. Training signals captured by GateTrainingCollector listener // // CLI: spf-smart-gate worker [--role writer|researcher] [--checkpoint path] // // Depends on: Block E (transformer), Block I (config), Block J (gate training), // Block K (transformer tools), Block N (pipeline) use serde_json::json; use std::sync::{Arc, Mutex, RwLock}; use crate::config::TransformerConfig; use crate::pipeline::PipelineState; use crate::transformer_tools::TransformerState; // ============================================================================ // WORKER CONFIGURATION // ============================================================================ /// Worker-specific configuration (supplements TransformerConfig) #[derive(Debug, Clone)] pub struct WorkerConfig { /// Model role: "writer" or "researcher" pub role: String, /// Override checkpoint path (if different from config default) pub checkpoint_override: Option, /// Max concurrent pipeline tasks pub max_concurrent: usize, /// Heartbeat interval in seconds (report status to orchestrator) pub heartbeat_interval_secs: u64, /// Auto-checkpoint interval in training steps pub auto_checkpoint_steps: u64, /// Enable training while serving (online learning) pub train_while_serving: bool, } impl Default for WorkerConfig { fn default() -> Self { Self { role: "writer".to_string(), checkpoint_override: None, max_concurrent: 4, heartbeat_interval_secs: 30, auto_checkpoint_steps: 1000, train_while_serving: true, } } } // ============================================================================ // WORKER STATE // ============================================================================ /// Runtime state for the worker process pub struct WorkerState { /// Worker configuration pub config: WorkerConfig, /// Transformer configuration pub transformer_config: TransformerConfig, /// Pipeline state for task management pub pipeline: Arc>, /// Tasks processed since boot pub tasks_processed: u64, /// Tasks failed since boot pub tasks_failed: u64, /// Worker uptime start pub started_at: std::time::Instant, /// Whether worker is accepting tasks pub accepting: bool, } impl WorkerState { pub fn new(config: WorkerConfig, transformer_config: TransformerConfig) -> Self { Self { config, transformer_config, pipeline: Arc::new(Mutex::new(PipelineState::new())), tasks_processed: 0, tasks_failed: 0, started_at: std::time::Instant::now(), accepting: true, } } /// Get worker status as JSON for monitoring/heartbeat pub fn status_json(&self) -> serde_json::Value { let uptime = self.started_at.elapsed().as_secs(); let pipeline_status = self.pipeline.lock().unwrap().status_summary(); json!({ "mode": "worker", "role": self.config.role, "accepting": self.accepting, "uptime_secs": uptime, "tasks_processed": self.tasks_processed, "tasks_failed": self.tasks_failed, "max_concurrent": self.config.max_concurrent, "train_while_serving": self.config.train_while_serving, "pipeline": pipeline_status, }) } } // ============================================================================ // WORKER BOOT SEQUENCE // ============================================================================ /// Initialize transformer state for worker mode. /// Loads checkpoint if available, otherwise creates fresh model. pub fn init_transformer( transformer_config: &TransformerConfig, worker_config: &WorkerConfig, ) -> Result { if !transformer_config.enabled { return Err("Transformer not enabled in config. Set enabled=true in transformer.json".to_string()); } // Determine checkpoint path let checkpoint_path = worker_config.checkpoint_override.clone() .unwrap_or_else(|| { let checkpoint_name = match worker_config.role.as_str() { "researcher" => &transformer_config.researcher_checkpoint, _ => &transformer_config.writer_checkpoint, }; let models_dir = crate::paths::spf_root().join("LIVE/MODELS"); models_dir.join(checkpoint_name).to_string_lossy().to_string() }); // Create transformer state let mut state = TransformerState::from_config(transformer_config, &worker_config.role); // Try to load checkpoint let checkpoint_file = std::path::Path::new(&checkpoint_path); if checkpoint_file.exists() { match std::fs::read(checkpoint_file) { Ok(data) => { match crate::checkpoint::deserialize_weights(&data) { Ok((checkpoint_weights, meta)) => { let mut model_weights = state.model.weights_mut(); if let Err(e) = crate::checkpoint::apply_weights(&mut model_weights, &checkpoint_weights) { eprintln!("[SPF-WORKER] WARNING: Failed to apply checkpoint weights: {}. Starting fresh.", e); } else { eprintln!("[SPF-WORKER] Checkpoint meta: model={}, step={}", meta.model_id, meta.step); } state.last_checkpoint = checkpoint_path.clone(); eprintln!("[SPF-WORKER] Loaded checkpoint: {} ({} bytes)", checkpoint_path, data.len()); } Err(e) => { eprintln!("[SPF-WORKER] WARNING: Failed to deserialize checkpoint: {}. Starting fresh.", e); } } } Err(e) => { eprintln!("[SPF-WORKER] WARNING: Failed to read checkpoint {}: {}. Starting fresh.", checkpoint_path, e); } } } else { eprintln!("[SPF-WORKER] No checkpoint at {}. Starting with random weights.", checkpoint_path); } Ok(state) } /// Save a checkpoint of current transformer weights. pub fn save_checkpoint( state: &TransformerState, transformer_config: &TransformerConfig, worker_config: &WorkerConfig, ) -> Result { let models_dir = crate::paths::spf_root().join("LIVE/MODELS"); std::fs::create_dir_all(&models_dir) .map_err(|e| format!("Failed to create MODELS dir: {}", e))?; let checkpoint_name = match worker_config.role.as_str() { "researcher" => &transformer_config.researcher_checkpoint, _ => &transformer_config.writer_checkpoint, }; let path = models_dir.join(checkpoint_name); let weights_refs = state.model.weights(); let data = crate::checkpoint::serialize_weights( &weights_refs, &worker_config.role, state.training_step, ).map_err(|e| format!("Serialize failed: {}", e))?; std::fs::write(&path, &data) .map_err(|e| format!("Failed to write checkpoint: {}", e))?; let path_str = path.to_string_lossy().to_string(); eprintln!("[SPF-WORKER] Checkpoint saved: {} ({} bytes, step {})", path_str, data.len(), state.training_step); Ok(path_str) } // ============================================================================ // WORKER LOOP — process pipeline tasks // ============================================================================ /// Process a single pipeline task using the transformer. /// The transformer decides which tool to call based on the task, /// then dispatch::call() executes it through the normal gate pipeline. pub fn process_task( task: &crate::pipeline::PipelineTask, _transformer: &Arc>, state: &Arc, ) -> crate::pipeline::PipelineResult { let start = std::time::Instant::now(); // Execute the task's tool through normal dispatch let source = crate::dispatch::Source::Pipeline { stream_id: task.stream_id.clone(), peer_key: "self".to_string(), }; let response = crate::dispatch::call(state, source, &task.tool, &task.args); let duration_ms = start.elapsed().as_millis() as u64; // FL-10: Store pipeline execution as training signal in LMDB. // handle_train() reads tlog:* keys — same path for all signal sources // (gate, mesh brain_sync, pipeline, evil/good labels). { let signal = crate::gate_training::TrainingSignal { tool: task.tool.clone(), source: "pipeline".to_string(), allowed: response.status == "ok", status: response.status.clone(), duration_ms, timestamp: chrono::Utc::now().to_rfc3339(), user_override: false, false_positive: false, recent_call_count: 0, preceding_tools: vec![], evil_score: 0.0, }; let db_path = crate::paths::spf_root().join("LIVE/LMDB5/LMDB5.DB"); if let Ok(db) = crate::agent_state::AgentStateDb::open(&db_path) { if let Ok(json) = serde_json::to_string(&signal) { let tlog_key = format!("tlog:{}", signal.timestamp); let _ = db.set_state(&tlog_key, &json); } } } let status = if response.status == "ok" { crate::pipeline::PipelineStatus::Ok } else { crate::pipeline::PipelineStatus::Blocked }; crate::pipeline::PipelineResult { task_id: task.task_id.clone(), stream_id: task.stream_id.clone(), status, result: response.result, error: if response.status != "ok" { Some(response.status) } else { None }, duration_ms, executed_by: "worker".to_string(), } } // ============================================================================ // WORKER MAIN LOOP — headless task processing (no stdio) // ============================================================================ /// Run the worker main loop. Polls pipeline for tasks, processes them, /// records results, and auto-checkpoints at configured intervals. /// Blocks until shutdown signal (SIGTERM/SIGINT). pub fn run_worker( worker_state: &mut WorkerState, transformer: &Arc>, server_state: &Arc, ) { eprintln!("[SPF-WORKER] Worker started: role={}, max_concurrent={}", worker_state.config.role, worker_state.config.max_concurrent); let poll_interval = std::time::Duration::from_millis(100); let max_concurrent = worker_state.config.max_concurrent; let auto_checkpoint_steps = worker_state.config.auto_checkpoint_steps; loop { // Poll pipeline for available tasks let tasks = { let mut pipeline = worker_state.pipeline.lock().unwrap(); pipeline.next_tasks(max_concurrent) }; if tasks.is_empty() { // No work — sleep briefly before polling again std::thread::sleep(poll_interval); continue; } // Process each task through the normal dispatch pipeline for task in &tasks { let result = process_task(task, transformer, server_state); let succeeded = result.status == crate::pipeline::PipelineStatus::Ok; if succeeded { worker_state.tasks_processed += 1; } else { worker_state.tasks_failed += 1; } // Record result (may trigger chained tasks) let mut pipeline = worker_state.pipeline.lock().unwrap(); if let Some(chained_task) = pipeline.record_result(result) { // Chained task returned — process immediately on next iteration drop(pipeline); let chain_result = process_task(&chained_task, transformer, server_state); let mut pipeline = worker_state.pipeline.lock().unwrap(); pipeline.record_result(chain_result); } } // Auto-checkpoint at configured interval let current_step = { let state = transformer.read().unwrap(); state.training_step }; if auto_checkpoint_steps > 0 && current_step > 0 && current_step % auto_checkpoint_steps == 0 { let state = transformer.read().unwrap(); if let Err(e) = save_checkpoint(&state, &worker_state.transformer_config, &worker_state.config) { eprintln!("[SPF-WORKER] Auto-checkpoint failed: {}", e); } } } } // ============================================================================ // TESTS // ============================================================================ #[cfg(test)] mod tests { use super::*; #[test] fn test_worker_config_default() { let config = WorkerConfig::default(); assert_eq!(config.role, "writer"); assert!(config.checkpoint_override.is_none()); assert_eq!(config.max_concurrent, 4); assert_eq!(config.heartbeat_interval_secs, 30); assert!(config.train_while_serving); } #[test] fn test_worker_state_new() { let wc = WorkerConfig::default(); let tc = TransformerConfig::default(); let state = WorkerState::new(wc, tc); assert_eq!(state.tasks_processed, 0); assert_eq!(state.tasks_failed, 0); assert!(state.accepting); } #[test] fn test_worker_status_json() { let wc = WorkerConfig { role: "researcher".to_string(), ..Default::default() }; let tc = TransformerConfig::default(); let mut state = WorkerState::new(wc, tc); state.tasks_processed = 42; state.tasks_failed = 3; let status = state.status_json(); assert_eq!(status["mode"], "worker"); assert_eq!(status["role"], "researcher"); assert_eq!(status["tasks_processed"], 42); assert_eq!(status["tasks_failed"], 3); assert_eq!(status["accepting"], true); } #[test] fn test_worker_config_custom() { let config = WorkerConfig { role: "researcher".to_string(), checkpoint_override: Some("/custom/path.spfc".to_string()), max_concurrent: 8, heartbeat_interval_secs: 60, auto_checkpoint_steps: 500, train_while_serving: false, }; assert_eq!(config.role, "researcher"); assert_eq!(config.checkpoint_override.unwrap(), "/custom/path.spfc"); assert_eq!(config.max_concurrent, 8); assert!(!config.train_while_serving); } #[test] fn test_worker_state_uptime() { let wc = WorkerConfig::default(); let tc = TransformerConfig::default(); let state = WorkerState::new(wc, tc); // Uptime should be very small (just created) let status = state.status_json(); let uptime = status["uptime_secs"].as_u64().unwrap(); assert!(uptime < 2, "Uptime should be near zero, got {}", uptime); } #[test] fn test_worker_state_pipeline_integration() { let wc = WorkerConfig::default(); let tc = TransformerConfig::default(); let state = WorkerState::new(wc, tc); // Pipeline should be accessible and empty let pipeline = state.pipeline.lock().unwrap(); let summary = pipeline.status_summary(); assert_eq!(summary["active_streams"], 0); assert_eq!(summary["total_submitted"], 0); } #[test] fn test_init_transformer_disabled() { let tc = TransformerConfig { enabled: false, ..TransformerConfig::default() }; let wc = WorkerConfig::default(); let result = init_transformer(&tc, &wc); assert!(result.is_err()); assert!(result.unwrap_err().contains("not enabled")); } #[test] fn test_init_transformer_no_checkpoint() { let tc = TransformerConfig { enabled: true, writer_checkpoint: "nonexistent_checkpoint.spfc".to_string(), d_model: 32, n_heads: 2, n_layers: 1, vocab_size: 64, max_seq_len: 16, d_ff: 128, ..TransformerConfig::default() }; let wc = WorkerConfig::default(); // Should succeed with fresh random weights (no checkpoint file) let result = init_transformer(&tc, &wc); assert!(result.is_ok()); let state = result.unwrap(); assert_eq!(state.role, "writer"); assert!(state.last_checkpoint.is_empty()); } #[test] fn test_init_transformer_researcher_role() { let tc = TransformerConfig { enabled: true, d_model: 32, n_heads: 2, n_layers: 1, vocab_size: 64, max_seq_len: 16, d_ff: 128, ..TransformerConfig::default() }; let wc = WorkerConfig { role: "researcher".to_string(), ..Default::default() }; let result = init_transformer(&tc, &wc); assert!(result.is_ok()); assert_eq!(result.unwrap().role, "researcher"); } }