| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use chrono::{DateTime, Utc}; |
| use serde::{Deserialize, Serialize}; |
| use serde_json::Value; |
| use sha2::{Digest, Sha256}; |
| use std::sync::{Arc, Mutex}; |
| use std::time::Instant; |
|
|
| use crate::config::PoolPeer; |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub enum WorkerStatus { |
| |
| Idle, |
| |
| Busy { |
| task_id: String, |
| since: Instant, |
| }, |
| } |
|
|
| impl WorkerStatus { |
| |
| pub fn is_idle(&self) -> bool { |
| matches!(self, WorkerStatus::Idle) |
| } |
|
|
| |
| pub fn task_id(&self) -> Option<&str> { |
| match self { |
| WorkerStatus::Busy { task_id, .. } => Some(task_id.as_str()), |
| WorkerStatus::Idle => None, |
| } |
| } |
|
|
| |
| pub fn elapsed_secs(&self) -> u64 { |
| match self { |
| WorkerStatus::Busy { since, .. } => since.elapsed().as_secs(), |
| WorkerStatus::Idle => 0, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct PoolEntry { |
| |
| pub peer: PoolPeer, |
| |
| pub status: WorkerStatus, |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub struct PoolState { |
| |
| entries: Mutex<Vec<PoolEntry>>, |
| |
| pool_size: u8, |
| } |
|
|
| impl PoolState { |
| |
| |
| pub fn new(peers: &[PoolPeer], pool_size: u8) -> Arc<Self> { |
| let entries = peers |
| .iter() |
| .map(|p| PoolEntry { |
| peer: p.clone(), |
| status: WorkerStatus::Idle, |
| }) |
| .collect(); |
| Arc::new(Self { |
| entries: Mutex::new(entries), |
| pool_size: pool_size.min(8), |
| }) |
| } |
|
|
| |
| |
| pub fn status_snapshot(&self) -> Vec<(String, String, Option<String>, u64)> { |
| let entries = self.entries.lock().unwrap(); |
| entries |
| .iter() |
| .map(|e| { |
| let status_str = if e.status.is_idle() { |
| "idle".to_string() |
| } else { |
| "busy".to_string() |
| }; |
| let task_id = e.status.task_id().map(|s| s.to_string()); |
| let elapsed = e.status.elapsed_secs(); |
| (e.peer.name.clone(), status_str, task_id, elapsed) |
| }) |
| .collect() |
| } |
|
|
| |
| |
| pub fn find_idle(&self) -> Option<String> { |
| let entries = self.entries.lock().unwrap(); |
| let busy_count = entries.iter().filter(|e| !e.status.is_idle()).count(); |
| if busy_count >= self.pool_size as usize { |
| return None; |
| } |
| entries |
| .iter() |
| .find(|e| e.status.is_idle()) |
| .map(|e| e.peer.key_hex.clone()) |
| } |
|
|
| |
| pub fn get_peer(&self, key_hex: &str) -> Option<PoolPeer> { |
| let entries = self.entries.lock().unwrap(); |
| entries |
| .iter() |
| .find(|e| e.peer.key_hex == key_hex) |
| .map(|e| e.peer.clone()) |
| } |
|
|
| |
| pub fn find_by_name(&self, name: &str) -> Option<String> { |
| let entries = self.entries.lock().unwrap(); |
| entries |
| .iter() |
| .find(|e| e.peer.name.eq_ignore_ascii_case(name)) |
| .map(|e| e.peer.key_hex.clone()) |
| } |
|
|
| |
| |
| pub fn borrow(&self, key_hex: &str, task_id: &str) -> Result<(), String> { |
| let mut entries = self.entries.lock().unwrap(); |
| let busy_count = entries.iter().filter(|e| !e.status.is_idle()).count(); |
| if busy_count >= self.pool_size as usize { |
| return Err(format!( |
| "Pool at capacity ({}/{}). No workers available.", |
| busy_count, self.pool_size |
| )); |
| } |
| match entries.iter_mut().find(|e| e.peer.key_hex == key_hex) { |
| None => Err(format!("Worker not found: {}", key_hex)), |
| Some(entry) => { |
| if !entry.status.is_idle() { |
| Err(format!( |
| "Worker {} is already busy (task: {})", |
| entry.peer.name, |
| entry.status.task_id().unwrap_or("?") |
| )) |
| } else { |
| entry.status = WorkerStatus::Busy { |
| task_id: task_id.to_string(), |
| since: Instant::now(), |
| }; |
| Ok(()) |
| } |
| } |
| } |
| } |
|
|
| |
| |
| pub fn release(&self, key_hex: &str) -> Result<String, String> { |
| let mut entries = self.entries.lock().unwrap(); |
| match entries.iter_mut().find(|e| e.peer.key_hex == key_hex) { |
| None => Err(format!("Worker not found: {}", key_hex)), |
| Some(entry) => { |
| let name = entry.peer.name.clone(); |
| entry.status = WorkerStatus::Idle; |
| Ok(name) |
| } |
| } |
| } |
|
|
| |
| pub fn active_count(&self) -> usize { |
| let entries = self.entries.lock().unwrap(); |
| entries.iter().filter(|e| !e.status.is_idle()).count() |
| } |
|
|
| |
| pub fn capacity(&self) -> u8 { |
| self.pool_size |
| } |
|
|
| |
| pub fn idle_count(&self) -> usize { |
| let entries = self.entries.lock().unwrap(); |
| entries.iter().filter(|e| e.status.is_idle()).count() |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct ProofOfWork { |
| |
| pub task_id: String, |
| |
| pub worker_name: String, |
| |
| pub worker_key: String, |
| |
| pub tool: String, |
| |
| pub assigned_at: DateTime<Utc>, |
| |
| pub completed_at: DateTime<Utc>, |
| |
| pub duration_ms: u64, |
| |
| pub result_hash: String, |
| } |
|
|
| impl ProofOfWork { |
| |
| pub fn new( |
| task_id: &str, |
| worker_name: &str, |
| worker_key: &str, |
| tool: &str, |
| assigned_at: DateTime<Utc>, |
| result: &Value, |
| ) -> Self { |
| let completed_at = Utc::now(); |
| let duration_ms = (completed_at - assigned_at) |
| .num_milliseconds() |
| .max(0) as u64; |
| Self { |
| task_id: task_id.to_string(), |
| worker_name: worker_name.to_string(), |
| worker_key: worker_key.to_string(), |
| tool: tool.to_string(), |
| assigned_at, |
| completed_at, |
| duration_ms, |
| result_hash: hash_result(result), |
| } |
| } |
|
|
| |
| |
| pub fn as_manifest_targets(&self) -> Vec<String> { |
| vec![ |
| self.worker_name.clone(), |
| self.tool.clone(), |
| format!("{}ms", self.duration_ms), |
| ] |
| } |
| } |
|
|
| |
| |
| pub fn hash_result(result: &Value) -> String { |
| let json_str = serde_json::to_string(result).unwrap_or_default(); |
| let mut hasher = Sha256::new(); |
| hasher.update(json_str.as_bytes()); |
| hex::encode(hasher.finalize()) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| #[derive(Debug, Clone)] |
| pub struct TaskContext { |
| |
| pub task_id: String, |
| |
| pub worker_key: String, |
| |
| pub worker_name: String, |
| |
| pub tool: String, |
| |
| pub assigned_at: DateTime<Utc>, |
| } |
|
|
| impl TaskContext { |
| |
| pub fn new(task_id: &str, worker_key: &str, worker_name: &str, tool: &str) -> Self { |
| Self { |
| task_id: task_id.to_string(), |
| worker_key: worker_key.to_string(), |
| worker_name: worker_name.to_string(), |
| tool: tool.to_string(), |
| assigned_at: Utc::now(), |
| } |
| } |
|
|
| |
| pub fn into_proof(self, result: &Value) -> ProofOfWork { |
| ProofOfWork::new( |
| &self.task_id, |
| &self.worker_name, |
| &self.worker_key, |
| &self.tool, |
| self.assigned_at, |
| result, |
| ) |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub fn new_task_id() -> String { |
| use std::sync::atomic::{AtomicU64, Ordering}; |
| static COUNTER: AtomicU64 = AtomicU64::new(0); |
| let ts = Utc::now().timestamp_millis(); |
| let n = COUNTER.fetch_add(1, Ordering::Relaxed); |
| format!("task-{}-{}", ts, n) |
| } |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| pub fn log_startup_status(peers: &[PoolPeer], role: &str) { |
| let green = "\x1b[32m●\x1b[0m"; |
| let red = "\x1b[31m●\x1b[0m"; |
| let bold = "\x1b[1m"; |
| let reset = "\x1b[0m"; |
|
|
| eprintln!( |
| "[SPFsmartGATE] MESH {} {}ROLE: {}{}", |
| green, bold, role.to_uppercase(), reset |
| ); |
|
|
| if peers.is_empty() { |
| eprintln!("[SPFsmartGATE] MESH {} NO PEERS CONFIGURED", red); |
| } else { |
| for peer in peers { |
| eprintln!( |
| "[SPFsmartGATE] PEER {} {} — port:{}", |
| green, peer.name, peer.port |
| ); |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
| use crate::config::{NetworkConfig, PoolPeer}; |
|
|
| fn make_peer(name: &str, key: &str, port: u16) -> PoolPeer { |
| PoolPeer { |
| name: name.to_string(), |
| key_hex: key.to_string(), |
| port, |
| capabilities: vec!["tools".to_string()], |
| } |
| } |
|
|
| fn make_pool(peers: &[PoolPeer]) -> Arc<PoolState> { |
| PoolState::new(peers, 8) |
| } |
|
|
| #[test] |
| fn pool_starts_all_idle() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901), make_peer("CHARLIE", "bbb", 4902)]; |
| let pool = make_pool(&peers); |
| assert_eq!(pool.idle_count(), 2); |
| assert_eq!(pool.active_count(), 0); |
| } |
|
|
| #[test] |
| fn borrow_marks_busy() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901)]; |
| let pool = make_pool(&peers); |
| pool.borrow("aaa", "task-1").unwrap(); |
| assert_eq!(pool.active_count(), 1); |
| assert_eq!(pool.idle_count(), 0); |
| } |
|
|
| #[test] |
| fn borrow_already_busy_fails() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901)]; |
| let pool = make_pool(&peers); |
| pool.borrow("aaa", "task-1").unwrap(); |
| let result = pool.borrow("aaa", "task-2"); |
| assert!(result.is_err()); |
| assert!(result.unwrap_err().contains("already busy")); |
| } |
|
|
| #[test] |
| fn release_marks_idle() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901)]; |
| let pool = make_pool(&peers); |
| pool.borrow("aaa", "task-1").unwrap(); |
| pool.release("aaa").unwrap(); |
| assert_eq!(pool.idle_count(), 1); |
| assert_eq!(pool.active_count(), 0); |
| } |
|
|
| #[test] |
| fn find_idle_returns_first_idle() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901), make_peer("CHARLIE", "bbb", 4902)]; |
| let pool = make_pool(&peers); |
| pool.borrow("aaa", "task-1").unwrap(); |
| let idle = pool.find_idle().unwrap(); |
| assert_eq!(idle, "bbb"); |
| } |
|
|
| #[test] |
| fn find_idle_none_when_all_busy() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901)]; |
| let pool = make_pool(&peers); |
| pool.borrow("aaa", "task-1").unwrap(); |
| assert!(pool.find_idle().is_none()); |
| } |
|
|
| #[test] |
| fn pool_size_cap_enforced() { |
| |
| let peers = vec![make_peer("ALPHA", "aaa", 4901), make_peer("CHARLIE", "bbb", 4902)]; |
| let pool = PoolState::new(&peers, 1); |
| pool.borrow("aaa", "task-1").unwrap(); |
| |
| let result = pool.borrow("bbb", "task-2"); |
| assert!(result.is_err()); |
| assert!(result.unwrap_err().contains("capacity")); |
| } |
|
|
| #[test] |
| fn find_by_name_case_insensitive() { |
| let peers = vec![make_peer("ALPHA", "aaa", 4901)]; |
| let pool = make_pool(&peers); |
| assert_eq!(pool.find_by_name("alpha").unwrap(), "aaa"); |
| assert_eq!(pool.find_by_name("ALPHA").unwrap(), "aaa"); |
| assert_eq!(pool.find_by_name("Alpha").unwrap(), "aaa"); |
| } |
|
|
| #[test] |
| fn hash_result_is_deterministic() { |
| let v = serde_json::json!({"text": "hello", "count": 42}); |
| let h1 = hash_result(&v); |
| let h2 = hash_result(&v); |
| assert_eq!(h1, h2); |
| assert_eq!(h1.len(), 64); |
| } |
|
|
| #[test] |
| fn proof_of_work_fields_correct() { |
| let ctx = TaskContext::new("task-1", "aaa", "ALPHA", "spf_read"); |
| let result = serde_json::json!({"text": "file contents"}); |
| let pow = ctx.into_proof(&result); |
| assert_eq!(pow.task_id, "task-1"); |
| assert_eq!(pow.worker_name, "ALPHA"); |
| assert_eq!(pow.worker_key, "aaa"); |
| assert_eq!(pow.tool, "spf_read"); |
| assert_eq!(pow.result_hash.len(), 64); |
| assert!(pow.duration_ms < 1000); |
| } |
|
|
| #[test] |
| fn new_task_id_is_unique() { |
| let id1 = new_task_id(); |
| let id2 = new_task_id(); |
| assert_ne!(id1, id2); |
| assert!(id1.starts_with("task-")); |
| assert!(id2.starts_with("task-")); |
| } |
|
|
| #[test] |
| fn network_config_default_integrates() { |
| |
| let config = NetworkConfig::default(); |
| let pool = PoolState::new(&config.peers, config.effective_pool_size()); |
| assert_eq!(pool.idle_count(), 0); |
| assert_eq!(pool.capacity(), 8); |
| } |
| } |
|
|