| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use serde_json::json; |
| use std::sync::{Arc, Mutex, RwLock}; |
|
|
| use crate::config::TransformerConfig; |
| use crate::pipeline::PipelineState; |
| use crate::transformer_tools::TransformerState; |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct WorkerConfig { |
| |
| pub role: String, |
| |
| pub checkpoint_override: Option<String>, |
| |
| pub max_concurrent: usize, |
| |
| pub heartbeat_interval_secs: u64, |
| |
| pub auto_checkpoint_steps: u64, |
| |
| pub train_while_serving: bool, |
| } |
|
|
| impl Default for WorkerConfig { |
| fn default() -> Self { |
| Self { |
| role: "writer".to_string(), |
| checkpoint_override: None, |
| max_concurrent: 4, |
| heartbeat_interval_secs: 30, |
| auto_checkpoint_steps: 1000, |
| train_while_serving: true, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct WorkerState { |
| |
| pub config: WorkerConfig, |
| |
| pub transformer_config: TransformerConfig, |
| |
| pub pipeline: Arc<Mutex<PipelineState>>, |
| |
| pub tasks_processed: u64, |
| |
| pub tasks_failed: u64, |
| |
| pub started_at: std::time::Instant, |
| |
| pub accepting: bool, |
| } |
|
|
| impl WorkerState { |
| pub fn new(config: WorkerConfig, transformer_config: TransformerConfig) -> Self { |
| Self { |
| config, |
| transformer_config, |
| pipeline: Arc::new(Mutex::new(PipelineState::new())), |
| tasks_processed: 0, |
| tasks_failed: 0, |
| started_at: std::time::Instant::now(), |
| accepting: true, |
| } |
| } |
|
|
| |
| pub fn status_json(&self) -> serde_json::Value { |
| let uptime = self.started_at.elapsed().as_secs(); |
| let pipeline_status = self.pipeline.lock().unwrap().status_summary(); |
|
|
| json!({ |
| "mode": "worker", |
| "role": self.config.role, |
| "accepting": self.accepting, |
| "uptime_secs": uptime, |
| "tasks_processed": self.tasks_processed, |
| "tasks_failed": self.tasks_failed, |
| "max_concurrent": self.config.max_concurrent, |
| "train_while_serving": self.config.train_while_serving, |
| "pipeline": pipeline_status, |
| }) |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn init_transformer( |
| transformer_config: &TransformerConfig, |
| worker_config: &WorkerConfig, |
| ) -> Result<TransformerState, String> { |
| if !transformer_config.enabled { |
| return Err("Transformer not enabled in config. Set enabled=true in transformer.json".to_string()); |
| } |
|
|
| |
| let checkpoint_path = worker_config.checkpoint_override.clone() |
| .unwrap_or_else(|| { |
| let checkpoint_name = match worker_config.role.as_str() { |
| "researcher" => &transformer_config.researcher_checkpoint, |
| _ => &transformer_config.writer_checkpoint, |
| }; |
| let models_dir = crate::paths::spf_root().join("LIVE/MODELS"); |
| models_dir.join(checkpoint_name).to_string_lossy().to_string() |
| }); |
|
|
| |
| let mut state = TransformerState::from_config(transformer_config, &worker_config.role); |
|
|
| |
| let checkpoint_file = std::path::Path::new(&checkpoint_path); |
| if checkpoint_file.exists() { |
| match std::fs::read(checkpoint_file) { |
| Ok(data) => { |
| match crate::checkpoint::deserialize_weights(&data) { |
| Ok((checkpoint_weights, meta)) => { |
| let mut model_weights = state.model.weights_mut(); |
| if let Err(e) = crate::checkpoint::apply_weights(&mut model_weights, &checkpoint_weights) { |
| eprintln!("[SPF-WORKER] WARNING: Failed to apply checkpoint weights: {}. Starting fresh.", e); |
| } else { |
| eprintln!("[SPF-WORKER] Checkpoint meta: model={}, step={}", meta.model_id, meta.step); |
| } |
| state.last_checkpoint = checkpoint_path.clone(); |
| eprintln!("[SPF-WORKER] Loaded checkpoint: {} ({} bytes)", |
| checkpoint_path, data.len()); |
| } |
| Err(e) => { |
| eprintln!("[SPF-WORKER] WARNING: Failed to deserialize checkpoint: {}. Starting fresh.", e); |
| } |
| } |
| } |
| Err(e) => { |
| eprintln!("[SPF-WORKER] WARNING: Failed to read checkpoint {}: {}. Starting fresh.", |
| checkpoint_path, e); |
| } |
| } |
| } else { |
| eprintln!("[SPF-WORKER] No checkpoint at {}. Starting with random weights.", checkpoint_path); |
| } |
|
|
| Ok(state) |
| } |
|
|
| |
| pub fn save_checkpoint( |
| state: &TransformerState, |
| transformer_config: &TransformerConfig, |
| worker_config: &WorkerConfig, |
| ) -> Result<String, String> { |
| let models_dir = crate::paths::spf_root().join("LIVE/MODELS"); |
| std::fs::create_dir_all(&models_dir) |
| .map_err(|e| format!("Failed to create MODELS dir: {}", e))?; |
|
|
| let checkpoint_name = match worker_config.role.as_str() { |
| "researcher" => &transformer_config.researcher_checkpoint, |
| _ => &transformer_config.writer_checkpoint, |
| }; |
|
|
| let path = models_dir.join(checkpoint_name); |
| let weights_refs = state.model.weights(); |
| let data = crate::checkpoint::serialize_weights( |
| &weights_refs, |
| &worker_config.role, |
| state.training_step, |
| ).map_err(|e| format!("Serialize failed: {}", e))?; |
| std::fs::write(&path, &data) |
| .map_err(|e| format!("Failed to write checkpoint: {}", e))?; |
|
|
| let path_str = path.to_string_lossy().to_string(); |
| eprintln!("[SPF-WORKER] Checkpoint saved: {} ({} bytes, step {})", |
| path_str, data.len(), state.training_step); |
|
|
| Ok(path_str) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub fn process_task( |
| task: &crate::pipeline::PipelineTask, |
| _transformer: &Arc<RwLock<TransformerState>>, |
| state: &Arc<crate::http::ServerState>, |
| ) -> crate::pipeline::PipelineResult { |
| let start = std::time::Instant::now(); |
|
|
| |
| let source = crate::dispatch::Source::Pipeline { |
| stream_id: task.stream_id.clone(), |
| peer_key: "self".to_string(), |
| }; |
|
|
| let response = crate::dispatch::call(state, source, &task.tool, &task.args); |
|
|
| let duration_ms = start.elapsed().as_millis() as u64; |
|
|
| |
| |
| |
| { |
| let signal = crate::gate_training::TrainingSignal { |
| tool: task.tool.clone(), |
| source: "pipeline".to_string(), |
| allowed: response.status == "ok", |
| status: response.status.clone(), |
| duration_ms, |
| timestamp: chrono::Utc::now().to_rfc3339(), |
| user_override: false, |
| false_positive: false, |
| recent_call_count: 0, |
| preceding_tools: vec![], |
| evil_score: 0.0, |
| }; |
| let db_path = crate::paths::spf_root().join("LIVE/LMDB5/LMDB5.DB"); |
| if let Ok(db) = crate::agent_state::AgentStateDb::open(&db_path) { |
| if let Ok(json) = serde_json::to_string(&signal) { |
| let tlog_key = format!("tlog:{}", signal.timestamp); |
| let _ = db.set_state(&tlog_key, &json); |
| } |
| } |
| } |
|
|
| let status = if response.status == "ok" { |
| crate::pipeline::PipelineStatus::Ok |
| } else { |
| crate::pipeline::PipelineStatus::Blocked |
| }; |
|
|
| crate::pipeline::PipelineResult { |
| task_id: task.task_id.clone(), |
| stream_id: task.stream_id.clone(), |
| status, |
| result: response.result, |
| error: if response.status != "ok" { |
| Some(response.status) |
| } else { |
| None |
| }, |
| duration_ms, |
| executed_by: "worker".to_string(), |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub fn run_worker( |
| worker_state: &mut WorkerState, |
| transformer: &Arc<RwLock<TransformerState>>, |
| server_state: &Arc<crate::http::ServerState>, |
| ) { |
| eprintln!("[SPF-WORKER] Worker started: role={}, max_concurrent={}", |
| worker_state.config.role, worker_state.config.max_concurrent); |
|
|
| let poll_interval = std::time::Duration::from_millis(100); |
| let max_concurrent = worker_state.config.max_concurrent; |
| let auto_checkpoint_steps = worker_state.config.auto_checkpoint_steps; |
|
|
| loop { |
| |
| let tasks = { |
| let mut pipeline = worker_state.pipeline.lock().unwrap(); |
| pipeline.next_tasks(max_concurrent) |
| }; |
|
|
| if tasks.is_empty() { |
| |
| std::thread::sleep(poll_interval); |
| continue; |
| } |
|
|
| |
| for task in &tasks { |
| let result = process_task(task, transformer, server_state); |
|
|
| let succeeded = result.status == crate::pipeline::PipelineStatus::Ok; |
| if succeeded { |
| worker_state.tasks_processed += 1; |
| } else { |
| worker_state.tasks_failed += 1; |
| } |
|
|
| |
| let mut pipeline = worker_state.pipeline.lock().unwrap(); |
| if let Some(chained_task) = pipeline.record_result(result) { |
| |
| drop(pipeline); |
| let chain_result = process_task(&chained_task, transformer, server_state); |
| let mut pipeline = worker_state.pipeline.lock().unwrap(); |
| pipeline.record_result(chain_result); |
| } |
| } |
|
|
| |
| let current_step = { |
| let state = transformer.read().unwrap(); |
| state.training_step |
| }; |
| if auto_checkpoint_steps > 0 && current_step > 0 && current_step % auto_checkpoint_steps == 0 { |
| let state = transformer.read().unwrap(); |
| if let Err(e) = save_checkpoint(&state, &worker_state.transformer_config, &worker_state.config) { |
| eprintln!("[SPF-WORKER] Auto-checkpoint failed: {}", e); |
| } |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_worker_config_default() { |
| let config = WorkerConfig::default(); |
| assert_eq!(config.role, "writer"); |
| assert!(config.checkpoint_override.is_none()); |
| assert_eq!(config.max_concurrent, 4); |
| assert_eq!(config.heartbeat_interval_secs, 30); |
| assert!(config.train_while_serving); |
| } |
|
|
| #[test] |
| fn test_worker_state_new() { |
| let wc = WorkerConfig::default(); |
| let tc = TransformerConfig::default(); |
| let state = WorkerState::new(wc, tc); |
| assert_eq!(state.tasks_processed, 0); |
| assert_eq!(state.tasks_failed, 0); |
| assert!(state.accepting); |
| } |
|
|
| #[test] |
| fn test_worker_status_json() { |
| let wc = WorkerConfig { |
| role: "researcher".to_string(), |
| ..Default::default() |
| }; |
| let tc = TransformerConfig::default(); |
| let mut state = WorkerState::new(wc, tc); |
| state.tasks_processed = 42; |
| state.tasks_failed = 3; |
|
|
| let status = state.status_json(); |
| assert_eq!(status["mode"], "worker"); |
| assert_eq!(status["role"], "researcher"); |
| assert_eq!(status["tasks_processed"], 42); |
| assert_eq!(status["tasks_failed"], 3); |
| assert_eq!(status["accepting"], true); |
| } |
|
|
| #[test] |
| fn test_worker_config_custom() { |
| let config = WorkerConfig { |
| role: "researcher".to_string(), |
| checkpoint_override: Some("/custom/path.spfc".to_string()), |
| max_concurrent: 8, |
| heartbeat_interval_secs: 60, |
| auto_checkpoint_steps: 500, |
| train_while_serving: false, |
| }; |
| assert_eq!(config.role, "researcher"); |
| assert_eq!(config.checkpoint_override.unwrap(), "/custom/path.spfc"); |
| assert_eq!(config.max_concurrent, 8); |
| assert!(!config.train_while_serving); |
| } |
|
|
| #[test] |
| fn test_worker_state_uptime() { |
| let wc = WorkerConfig::default(); |
| let tc = TransformerConfig::default(); |
| let state = WorkerState::new(wc, tc); |
|
|
| |
| let status = state.status_json(); |
| let uptime = status["uptime_secs"].as_u64().unwrap(); |
| assert!(uptime < 2, "Uptime should be near zero, got {}", uptime); |
| } |
|
|
| #[test] |
| fn test_worker_state_pipeline_integration() { |
| let wc = WorkerConfig::default(); |
| let tc = TransformerConfig::default(); |
| let state = WorkerState::new(wc, tc); |
|
|
| |
| let pipeline = state.pipeline.lock().unwrap(); |
| let summary = pipeline.status_summary(); |
| assert_eq!(summary["active_streams"], 0); |
| assert_eq!(summary["total_submitted"], 0); |
| } |
|
|
| #[test] |
| fn test_init_transformer_disabled() { |
| let tc = TransformerConfig { |
| enabled: false, |
| ..TransformerConfig::default() |
| }; |
| let wc = WorkerConfig::default(); |
|
|
| let result = init_transformer(&tc, &wc); |
| assert!(result.is_err()); |
| assert!(result.unwrap_err().contains("not enabled")); |
| } |
|
|
| #[test] |
| fn test_init_transformer_no_checkpoint() { |
| let tc = TransformerConfig { |
| enabled: true, |
| writer_checkpoint: "nonexistent_checkpoint.spfc".to_string(), |
| d_model: 32, |
| n_heads: 2, |
| n_layers: 1, |
| vocab_size: 64, |
| max_seq_len: 16, |
| d_ff: 128, |
| ..TransformerConfig::default() |
| }; |
| let wc = WorkerConfig::default(); |
|
|
| |
| let result = init_transformer(&tc, &wc); |
| assert!(result.is_ok()); |
| let state = result.unwrap(); |
| assert_eq!(state.role, "writer"); |
| assert!(state.last_checkpoint.is_empty()); |
| } |
|
|
| #[test] |
| fn test_init_transformer_researcher_role() { |
| let tc = TransformerConfig { |
| enabled: true, |
| d_model: 32, |
| n_heads: 2, |
| n_layers: 1, |
| vocab_size: 64, |
| max_seq_len: 16, |
| d_ff: 128, |
| ..TransformerConfig::default() |
| }; |
| let wc = WorkerConfig { |
| role: "researcher".to_string(), |
| ..Default::default() |
| }; |
|
|
| let result = init_transformer(&tc, &wc); |
| assert!(result.is_ok()); |
| assert_eq!(result.unwrap().role, "researcher"); |
| } |
| } |
|
|