// SPF Smart Gateway - FLINT Transformer MCP Tool Handlers // Copyright 2026 Joseph Stone - All Rights Reserved // // FLINT — Focused Learning Intelligence for Network Threats // Named: Stone + Flint = Fire // // BLOCK K — Separate module for transformer tool logic. // mcp.rs gets minimal forwarding match arms (5 tools). // gate.rs gets 5 tools added to allowlist. // // All handlers follow the mcp.rs pattern: // fn handle_*(args, state) -> Value // Returns json!({"type": "text", "text": "..."}) matching MCP protocol. // // Depends on: Block E (transformer.rs, checkpoint.rs), Block I (TransformerConfig) use serde_json::{json, Value}; use std::sync::{Arc, RwLock}; use crate::config::TransformerConfig; use crate::agent_state::AgentStateDb; use crate::paths::spf_root; // ============================================================================ // FLINT IDENTITY // ============================================================================ /// FLINT's name — used in all tool responses pub const FLINT_NAME: &str = "FLINT"; pub const FLINT_VERSION: &str = "0.1.0"; // ============================================================================ // TRANSFORMER STATE — held in ServerState behind RwLock // ============================================================================ /// Runtime state for the transformer, wrapped in RwLock in ServerState. /// Multiple readers (inference) can proceed simultaneously. /// Single writer (training) acquires exclusive lock during weight update. pub struct TransformerState { /// The transformer model (Block E) pub model: crate::transformer::SPFTransformer, /// Configuration pub config: TransformerConfig, /// Current training step pub training_step: u64, /// Last checkpoint path pub last_checkpoint: String, /// Whether model is currently training pub is_training: bool, /// Cumulative loss (for metrics) pub total_loss: f64, /// Number of training batches completed pub batches_completed: u64, /// Gate alignment score (0.0 - 1.0) pub gate_alignment: f64, /// Model role: "writer" or "researcher" pub role: String, /// Reference to GateTrainingCollector for draining training signals. /// Shared with ServerState.listeners via Arc — same buffer, same signals. pub collector: Option>, /// AdamW optimizer pub optimizer: Option, /// Block PP: Whether FLINT auto-responds to chat (default: off) pub chat_enabled: bool, } impl TransformerState { /// Create a new TransformerState from config pub fn from_config(config: &TransformerConfig, role: &str) -> Self { let model_config = crate::transformer::TransformerModelConfig { vocab_size: config.vocab_size, d_model: config.d_model, n_heads: config.n_heads, n_layers: config.n_layers, d_ff: config.d_ff, max_seq_len: config.max_seq_len, ln_eps: 1e-5, }; let model = crate::transformer::SPFTransformer::new(model_config, 42); // FL-9: LearningController removed — training driven by handle_train() + LMDB. // Only AdamW optimizer needed (used directly in handle_train). let optimizer = if config.online_learning { let param_sizes: Vec = model.weights().iter().map(|w| w.numel()).collect(); Some(crate::train::AdamW::new( crate::train::AdamWConfig { lr: config.learning_rate as f32, ..Default::default() }, ¶m_sizes, )) } else { None }; Self { model, config: config.clone(), training_step: 0, last_checkpoint: String::new(), is_training: false, total_loss: 0.0, batches_completed: 0, gate_alignment: 0.0, role: role.to_string(), collector: None, optimizer, chat_enabled: false, // Block PP: off by default } } } // ============================================================================ // TOOL DEFINITIONS — called by mcp.rs tool_definitions() // ============================================================================ /// Returns the 5 transformer tool definitions for MCP registration. /// Called from mcp.rs tool_definitions() to avoid bloating that file. pub fn tool_definitions() -> Vec { vec![ json!({ "name": "spf_transformer_status", "description": "Get FLINT transformer status (loaded, params, checkpoint, role)", "inputSchema": { "type": "object", "properties": {}, "required": [] } }), json!({ "name": "spf_transformer_infer", "description": "Run FLINT inference: prompt → response. Returns generated tokens.", "inputSchema": { "type": "object", "properties": { "prompt": {"type": "string", "description": "Input text prompt"}, "max_tokens": {"type": "integer", "description": "Max tokens to generate (default: 64)"}, "temperature": {"type": "number", "description": "Sampling temperature (default: from config)"} }, "required": ["prompt"] } }), json!({ "name": "spf_transformer_chat", "description": "Chat with FLINT (conversation tracking, multi-turn)", "inputSchema": { "type": "object", "properties": { "message": {"type": "string", "description": "User message"}, "conversation_id": {"type": "string", "description": "Conversation ID (optional, auto-generated if omitted)"} }, "required": ["message"] } }), json!({ "name": "spf_transformer_train", "description": "Trigger FLINT manual training batch from accumulated gate signals", "inputSchema": { "type": "object", "properties": { "batch_size": {"type": "integer", "description": "Override batch size (optional)"} }, "required": [] } }), json!({ "name": "spf_transformer_metrics", "description": "Get FLINT learning metrics (loss, accuracy, gate alignment, training step)", "inputSchema": { "type": "object", "properties": {}, "required": [] } }), // FL-8: Evil/Good training input tools json!({ "name": "spf_flint_train_evil", "description": "Mark a tool call as evil/harmful. Creates negative training signal for FLINT.", "inputSchema": { "type": "object", "properties": { "tool": {"type": "string", "description": "Tool name that was evil"}, "reason": {"type": "string", "description": "Why this call was evil (optional)"} }, "required": ["tool"] } }), json!({ "name": "spf_flint_train_good", "description": "Mark a tool call as good/safe. Creates positive training signal for FLINT.", "inputSchema": { "type": "object", "properties": { "tool": {"type": "string", "description": "Tool name that was good"}, "reason": {"type": "string", "description": "Why this call was good (optional)"} }, "required": ["tool"] } }), ] } // ============================================================================ // TOOL HANDLERS — called from mcp.rs match arms // ============================================================================ /// Handle spf_transformer_status /// Returns: model loaded status, param count, checkpoint info, role, config summary pub fn handle_status( transformer: &Option>>, config: &TransformerConfig, ) -> Value { match transformer { None => { json!({"type": "text", "text": format!( "FLINT: NOT LOADED\nVersion: {}\nEnabled: {}\nConfig: d_model={}, n_heads={}, n_layers={}, vocab={}\nEstimated params: {}\nEstimated memory: {}MB", FLINT_VERSION, config.enabled, config.d_model, config.n_heads, config.n_layers, config.vocab_size, config.estimated_params(), config.estimated_memory_bytes() / 1_000_000 )}) } Some(state_lock) => { let state = state_lock.read().unwrap(); let avg_loss = if state.batches_completed > 0 { state.total_loss / state.batches_completed as f64 } else { 0.0 }; json!({"type": "text", "text": format!( "FLINT: LOADED v{} ({})\n\ Role: {}\n\ Training step: {}\n\ Batches completed: {}\n\ Avg loss: {:.6}\n\ Gate alignment: {:.2}%\n\ Currently training: {}\n\ Last checkpoint: {}\n\ Config: d_model={}, n_heads={}, n_layers={}, vocab={}\n\ Online learning: {}\n\ EWC lambda: {}", FLINT_VERSION, if state.is_training { "training" } else { "idle" }, state.role, state.training_step, state.batches_completed, avg_loss, state.gate_alignment * 100.0, state.is_training, if state.last_checkpoint.is_empty() { "none" } else { &state.last_checkpoint }, config.d_model, config.n_heads, config.n_layers, config.vocab_size, config.online_learning, config.ewc_lambda, )}) } } } /// Handle spf_transformer_infer /// Runs forward pass on prompt, generates tokens autoregressively. pub fn handle_infer( transformer: &Option>>, args: &Value, config: &TransformerConfig, tokenizer_path: &str, ) -> Value { let state_lock = match transformer { Some(s) => s, None => return json!({"type": "text", "text": "ERROR: FLINT not loaded. Enable in transformer.json."}), }; let prompt = match args.get("prompt").and_then(|v| v.as_str()) { Some(p) => p, None => return json!({"type": "text", "text": "ERROR: 'prompt' parameter required"}), }; let max_tokens = args.get("max_tokens") .and_then(|v| v.as_u64()) .unwrap_or(64) as usize; let temperature = args.get("temperature") .and_then(|v| v.as_f64()) .unwrap_or(config.temperature); // Load tokenizer let tokenizer = match crate::tokenizer::Tokenizer::load(tokenizer_path) { Ok(t) => t, Err(e) => return json!({"type": "text", "text": format!("ERROR: Failed to load tokenizer: {}", e)}), }; // Tokenize input let input_ids = tokenizer.encode(prompt); if input_ids.is_empty() { return json!({"type": "text", "text": "ERROR: Empty input after tokenization"}); } // Read lock for inference (concurrent with other readers) let state = state_lock.read().unwrap(); // Generate let output_ids = match state.model.generate(&input_ids, max_tokens, temperature as f32, 42) { Ok(ids) => ids, Err(e) => return json!({"type": "text", "text": format!("ERROR: Generation failed: {}", e)}), }; // Decode let output_text = tokenizer.decode(&output_ids); json!({"type": "text", "text": format!( "[FLINT] Input: {} ({} tokens)\nOutput: {} ({} tokens)\nTemperature: {:.2}", prompt, input_ids.len(), output_text, output_ids.len(), temperature )}) } /// Handle spf_transformer_chat /// Multi-turn conversation with context window management. pub fn handle_chat( transformer: &Option>>, args: &Value, config: &TransformerConfig, tokenizer_path: &str, ) -> Value { let state_lock = match transformer { Some(s) => s, None => return json!({"type": "text", "text": "ERROR: FLINT not loaded. Enable in transformer.json."}), }; // Block PP: Handle chat toggle (enabled: true/false) if let Some(enabled) = args.get("enabled").and_then(|v| v.as_bool()) { let mut state = state_lock.write().unwrap(); state.chat_enabled = enabled; return json!({"type": "text", "text": format!( "FLINT chat {}", if enabled { "ON — will respond to messages" } else { "OFF — silent mode" } )}); } // Block PP: Check if chat is enabled before processing { let state = state_lock.read().unwrap(); if !state.chat_enabled { return json!({"type": "text", "text": "FLINT chat is OFF. Use spf_transformer_chat with {\"enabled\": true} to activate." }); } } let message = match args.get("message").and_then(|v| v.as_str()) { Some(m) => m, None => return json!({"type": "text", "text": "ERROR: 'message' parameter required"}), }; let conversation_id = args.get("conversation_id") .and_then(|v| v.as_str()) .unwrap_or("default"); // Load tokenizer let tokenizer = match crate::tokenizer::Tokenizer::load(tokenizer_path) { Ok(t) => t, Err(e) => return json!({"type": "text", "text": format!("ERROR: Failed to load tokenizer: {}", e)}), }; // SB-6: Prepend brain context before generation // Pulls from episodic memory (past Q+A) and knowledge (moral framework, etc.) let episodic = crate::brain_local::brain_context(message, "flint_episodic", 1000); let knowledge = crate::brain_local::brain_context(message, "flint_knowledge", 500); let has_context = !episodic.trim().is_empty() || !knowledge.trim().is_empty(); // Format as chat turn — context prepended when available let chat_prompt = if has_context { format!("{}\n{}\n {} ", knowledge, episodic, message) } else { format!(" {} ", message) }; let input_ids = tokenizer.encode(&chat_prompt); // Read lock for inference let state = state_lock.read().unwrap(); // Generate response (capped at reasonable chat length) let max_response = 128.min(config.max_seq_len.saturating_sub(input_ids.len())); let output_ids = match state.model.generate(&input_ids, max_response, config.temperature as f32, 42) { Ok(ids) => ids, Err(e) => return json!({"type": "text", "text": format!("ERROR: Chat generation failed: {}", e)}), }; let response_text = tokenizer.decode(&output_ids); // SB-6: Index Q+A pair into flint_episodic for future context recall let qa_entry = format!("Q: {}\nA: {}", message, response_text); let _ = crate::brain_local::brain_store(&qa_entry, conversation_id, "flint_episodic"); json!({"type": "text", "text": format!( "conversation: {}\nuser: {}\nFLINT: {}", conversation_id, message, response_text )}) } /// Handle spf_transformer_train /// Triggers a manual training batch from accumulated gate signals. pub fn handle_train( transformer: &Option>>, args: &Value, config: &TransformerConfig, ) -> Value { let state_lock = match transformer { Some(s) => s, None => return json!({"type": "text", "text": "ERROR: FLINT not loaded. Enable in transformer.json."}), }; let batch_size = args.get("batch_size") .and_then(|v| v.as_u64()) .unwrap_or(config.batch_size as u64) as usize; // Write lock for training (exclusive access) let mut state = state_lock.write().unwrap(); if state.is_training { return json!({"type": "text", "text": "BUSY: FLINT training already in progress. Wait for completion."}); } state.is_training = true; // Step 1: Read training signals from agent_state LMDB (FL-2). // FL-1 persists signals as tlog:{timestamp} keys in route_signals(). // This eliminates the drain race: route_signals() drains the collector // for brain storage, then persists to LMDB. handle_train() reads from // LMDB instead of the (already-drained) collector. let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB"); let mut signals: Vec = Vec::new(); let mut consumed_keys: Vec = Vec::new(); if let Ok(db) = AgentStateDb::open(&db_path) { if let Ok(keys) = db.list_state_keys() { let mut tlog_keys: Vec = keys.into_iter() .filter(|k| k.starts_with("tlog:")) .collect(); tlog_keys.sort(); for key in &tlog_keys { if let Ok(Some(json)) = db.get_state(key) { if let Ok(signal) = serde_json::from_str::(&json) { signals.push(signal); consumed_keys.push(key.clone()); } } } // Delete consumed tlog entries after successful read for key in &consumed_keys { let _ = db.delete_state(key); } } } // Fallback: if LMDB yielded nothing, try collector drain (backward compat) if signals.is_empty() { signals = match &state.collector { Some(collector) => collector.drain_signals(), None => Vec::new(), }; } if signals.is_empty() { state.is_training = false; return json!({"type": "text", "text": "No pending training signals. Gate decisions accumulate signals automatically."}); } let signal_count = signals.len(); // Step 2: Convert signals to training examples via Block M (learning.rs) let mut examples: Vec = signals.iter().map(|signal| { crate::train::TrainingExample { input_tokens: crate::learning::signal_to_tokens(signal), target: crate::train::TrainingTarget::GateDecision(signal.label()), weight: signal.weight(), } }).collect(); // Limit to requested batch_size examples.truncate(batch_size); // Step 3: Record previous loss for comparison let previous_avg_loss = if state.batches_completed > 0 { (state.total_loss / state.batches_completed as f64) as f32 } else { f32::MAX }; // Step 4: Clone weights for training (agentic observer — microsecond read) let cloned_weights: Vec = state.model.weights().iter() .map(|w| crate::tensor::Tensor { data: w.data.clone(), shape: w.shape.clone() }) .collect(); // Release write lock intent — from here, train on clone only // (In full agentic mode, the lock is released here. In MCP handler mode, // we still hold the write lock but training is on the clone, not the model.) // Step 5: Forward + loss on each example using the LIVE model (read-only) let mut total_loss = 0.0f32; let mut correct = 0u64; let mut processed = 0u64; let mut all_grads: Option> = None; for example in &examples { let tokens: Vec = example.input_tokens.iter() .map(|&t| t as u32) .collect(); let seq_len = tokens.len().min(config.max_seq_len); if seq_len == 0 { continue; } // Forward pass WITH CACHE for backward match state.model.forward_causal_with_cache(&tokens[..seq_len], 1, seq_len) { Ok((logits, cache)) => { // Gate decision score: sigmoid of last-position first logit let last_offset = (seq_len - 1) * config.vocab_size; let gate_logit = logits.data.get(last_offset).copied().unwrap_or(0.0); let prediction = 1.0 / (1.0 + (-gate_logit).exp()); let label = match &example.target { crate::train::TrainingTarget::GateDecision(l) => l.clamp(0.0, 1.0), crate::train::TrainingTarget::NextToken(_) => continue, }; let pred_t = crate::tensor::Tensor::from_data( vec![prediction], vec![1] ).unwrap(); if let Ok((loss, d_pred)) = crate::train::binary_ce_loss( &pred_t, &[label], &[example.weight] ) { total_loss += loss; let predicted_allow = prediction > 0.5; let actual_allow = label > 0.5; if predicted_allow == actual_allow { correct += 1; } processed += 1; // Step 6: Composed backward pass // Build d_logits from d_pred (scatter to full logits shape) let mut d_logits_data = vec![0.0f32; seq_len * config.vocab_size]; d_logits_data[last_offset] = d_pred.data[0]; if let Ok(d_logits) = crate::tensor::Tensor::from_data( d_logits_data, vec![1, seq_len, config.vocab_size] ) { if let Ok(grads) = crate::train::model_backward_causal( &d_logits, &cache, &state.model, ) { // Accumulate gradients match &mut all_grads { None => all_grads = Some(grads), Some(acc) => { for (a, g) in acc.iter_mut().zip(grads.iter()) { for (av, gv) in a.data.iter_mut().zip(g.data.iter()) { *av += *gv; } } } } } } } } Err(e) => { eprintln!("[FLINT-TRAIN] Forward pass error: {}", e); continue; } } } // Step 7: Apply optimizer on cloned weights if we have gradients let avg_loss = if processed > 0 { total_loss / processed as f32 } else { 0.0 }; let alignment = if processed > 0 { correct as f64 / processed as f64 } else { 0.0 }; let mut weights_adopted = false; // FL-4: Load EWC state for penalty + Fisher update let ewc_path = spf_root().join("LIVE/MODELS/ewc_state.bin"); let total_params: usize = cloned_weights.iter().map(|t| t.data.len()).sum(); let mut ewc = if ewc_path.exists() { crate::learning::OnlineEWC::load_from_file(&ewc_path) .unwrap_or_else(|_| crate::learning::OnlineEWC::new(total_params, config.ewc_lambda as f32)) } else { crate::learning::OnlineEWC::new(total_params, config.ewc_lambda as f32) }; if let Some(grads) = all_grads { // Average gradients over processed examples let scale = 1.0 / processed.max(1) as f32; let mut grad_refs: Vec = grads.iter().map(|g| { let scaled: Vec = g.data.iter().map(|&v| v * scale).collect(); crate::tensor::Tensor { data: scaled, shape: g.shape.clone() } }).collect(); // FL-4: Apply EWC penalty gradients to prevent catastrophic forgetting if ewc.active { let flat_weights: Vec = cloned_weights.iter() .flat_map(|t| t.data.iter().copied()) .collect(); let (_ewc_loss, ewc_grads) = ewc.penalty(&flat_weights); let mut offset = 0; for grad_tensor in grad_refs.iter_mut() { let n = grad_tensor.data.len(); for i in 0..n { if offset + i < ewc_grads.len() { grad_tensor.data[i] += ewc_grads[offset + i]; } } offset += n; } } // Apply AdamW step on cloned weights let mut cloned = cloned_weights; if let Some(ref mut optimizer) = state.optimizer { let mut param_refs: Vec<&mut crate::tensor::Tensor> = cloned.iter_mut().collect(); let grad_ref_slice: Vec<&crate::tensor::Tensor> = grad_refs.iter().collect(); let lr = config.learning_rate as f32; optimizer.step(&mut param_refs, &grad_ref_slice, lr); // FL-3: Always adopt weights. Gate labels are ground truth — // loss gating blocked learning of difficult patterns by discarding // weight updates whenever loss temporarily increased. let mut model_weights = state.model.weights_mut(); for (mw, cw) in model_weights.iter_mut().zip(cloned.iter()) { mw.data.copy_from_slice(&cw.data); } weights_adopted = true; eprintln!("[FLINT-TRAIN] Batch loss: {:.6} (prev: {:.6}). Weights adopted.", avg_loss, previous_avg_loss); // FL-4: Update Fisher matrix with this batch's gradients let flat_grads: Vec = grad_refs.iter() .flat_map(|t| t.data.iter().copied()) .collect(); ewc.update_fisher(&flat_grads); // Snapshot adopted weights as EWC reference point let flat_adopted: Vec = state.model.weights().iter() .flat_map(|t| t.data.iter().copied()) .collect(); ewc.snapshot_weights(&flat_adopted); } } // FL-4: Persist EWC state after training if let Err(e) = ewc.save_to_file(&ewc_path) { eprintln!("[FLINT-TRAIN] EWC save error: {}", e); } else if ewc.update_count > 0 { if let Ok(db) = AgentStateDb::open(&db_path) { let meta = format!( "{{\"update_count\":{},\"lambda\":{},\"params\":{},\"active\":{}}}", ewc.update_count, ewc.lambda, total_params, ewc.active ); let _ = db.set_state("ewc:meta", &meta); } } // Step 9: Update state metrics state.training_step += 1; state.batches_completed += 1; state.total_loss += avg_loss as f64; state.gate_alignment = alignment; state.is_training = false; // CP-1: Persist checkpoint so RC-2 can restore weights + step on restart. // Only save when weights were actually adopted — skip no-op batches. if weights_adopted { let ckpt_dir = spf_root().join("LIVE/MODELS"); let _ = std::fs::create_dir_all(&ckpt_dir); let ckpt_path = ckpt_dir.join(&config.writer_checkpoint); let weight_refs = state.model.weights(); match crate::checkpoint::serialize_weights(&weight_refs, "flint_writer", state.training_step) { Ok(bytes) => match std::fs::write(&ckpt_path, &bytes) { Ok(()) => { state.last_checkpoint = ckpt_path.to_string_lossy().to_string(); eprintln!("[FLINT-TRAIN] Checkpoint saved: step={}", state.training_step); } Err(e) => eprintln!("[FLINT-TRAIN] Checkpoint write failed: {}", e), }, Err(e) => eprintln!("[FLINT-TRAIN] Checkpoint serialize failed: {}", e), } } json!({"type": "text", "text": format!( "FLINT training batch completed\n\ Signals drained: {}\n\ Examples processed: {}/{}\n\ Average loss: {:.6}\n\ Gate alignment: {:.1}%\n\ Weights adopted: {}\n\ Training step: {}\n\ Total batches: {}", signal_count, processed, examples.len(), avg_loss, alignment * 100.0, weights_adopted, state.training_step, state.batches_completed, )}) } /// Handle spf_transformer_metrics /// Returns current learning metrics. pub fn handle_metrics( transformer: &Option>>, config: &TransformerConfig, ) -> Value { match transformer { None => { json!({"type": "text", "text": "FLINT: NOT LOADED\nNo metrics available."}) } Some(state_lock) => { let state = state_lock.read().unwrap(); let avg_loss = if state.batches_completed > 0 { state.total_loss / state.batches_completed as f64 } else { f64::NAN }; json!({"type": "text", "text": format!( "=== FLINT Metrics ===\n\ Version: {}\n\ Role: {}\n\ Training step: {}\n\ Batches completed: {}\n\ Average loss: {:.6}\n\ Gate alignment: {:.2}%\n\ Learning rate: {:.2e}\n\ EWC lambda: {}\n\ Online learning: {}\n\ Replay buffer: {} slots\n\ Last checkpoint: {}", FLINT_VERSION, state.role, state.training_step, state.batches_completed, avg_loss, state.gate_alignment * 100.0, config.learning_rate, config.ewc_lambda, config.online_learning, config.replay_buffer_size, if state.last_checkpoint.is_empty() { "none" } else { &state.last_checkpoint }, )}) } } } // ============================================================================ // FL-8: Evil/Good Training Input Tools // ============================================================================ /// Handle spf_flint_train_evil — user labels a tool call as evil/harmful. /// Creates a negative training signal (false_positive=true, evil_score=1.0) /// and stores it in LMDB for the next training batch. pub fn handle_train_evil(args: &Value) -> Value { let tool = args.get("tool").and_then(|v| v.as_str()).unwrap_or(""); let reason = args.get("reason").and_then(|v| v.as_str()).unwrap_or("user labeled evil"); if tool.is_empty() { return json!({"type": "text", "text": "ERROR: 'tool' parameter required. Specify which tool call was evil."}); } let timestamp = chrono::Utc::now().to_rfc3339(); let signal = crate::gate_training::TrainingSignal { tool: tool.to_string(), source: "evil_label".to_string(), allowed: true, status: "evil".to_string(), duration_ms: 0, timestamp: timestamp.clone(), user_override: false, false_positive: true, recent_call_count: 0, preceding_tools: Vec::new(), evil_score: 1.0, }; // Store in LMDB as tlog entry for handle_train() consumption let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB"); if let Ok(db) = AgentStateDb::open(&db_path) { if let Ok(json) = serde_json::to_string(&signal) { let tlog_key = format!("tlog:{}", timestamp); let _ = db.set_state(&tlog_key, &json); } } json!({"type": "text", "text": format!( "FLINT evil label recorded for '{}': {}. Will be included in next training batch.", tool, reason )}) } /// Handle spf_flint_train_good — user labels a tool call as good/safe. /// Creates a positive training signal (allowed=true, evil_score=0.0) /// and stores it in LMDB for the next training batch. pub fn handle_train_good(args: &Value) -> Value { let tool = args.get("tool").and_then(|v| v.as_str()).unwrap_or(""); let reason = args.get("reason").and_then(|v| v.as_str()).unwrap_or("user labeled good"); if tool.is_empty() { return json!({"type": "text", "text": "ERROR: 'tool' parameter required. Specify which tool call was good."}); } let timestamp = chrono::Utc::now().to_rfc3339(); let signal = crate::gate_training::TrainingSignal { tool: tool.to_string(), source: "good_label".to_string(), allowed: true, status: "ok".to_string(), duration_ms: 0, timestamp: timestamp.clone(), user_override: false, false_positive: false, recent_call_count: 0, preceding_tools: Vec::new(), evil_score: 0.0, }; let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB"); if let Ok(db) = AgentStateDb::open(&db_path) { if let Ok(json) = serde_json::to_string(&signal) { let tlog_key = format!("tlog:{}", timestamp); let _ = db.set_state(&tlog_key, &json); } } json!({"type": "text", "text": format!( "FLINT good label recorded for '{}': {}. Will be included in next training batch.", tool, reason )}) } // ============================================================================ // GATE ALLOWLIST ADDITIONS (for gate.rs — document only) // ============================================================================ // // Add to gate.rs allowlist at line 197 (after "spf_mesh_call"): // // "spf_mesh_status" | "spf_mesh_peers" | "spf_mesh_call" | // // Transformer tools — Block K // "spf_transformer_status" | "spf_transformer_infer" | // "spf_transformer_chat" | "spf_transformer_train" | // "spf_transformer_metrics" // => validate::ValidationResult::ok(), // // ============================================================================ // MCP.RS ADDITIONS (document only — minimal forwarding) // ============================================================================ // // In tool_definitions(): // // ====== TRANSFORMER TOOLS (Block K) ====== // // Tool defs are in transformer_tools::tool_definitions() // tools.extend(crate::transformer_tools::tool_definitions()); // // In handle_tool_call() match: // "spf_transformer_status" => { // let gate_params = ToolParams { ..Default::default() }; // let decision = gate::process("spf_transformer_status", &gate_params, config, session); // if !decision.allowed { /* blocked response */ } // crate::transformer_tools::handle_status(&state.transformer, &state.transformer_config) // } // "spf_transformer_infer" => { /* gate check + */ handle_infer(&state.transformer, args, ...) } // "spf_transformer_chat" => { /* gate check + */ handle_chat(&state.transformer, args, ...) } // "spf_transformer_train" => { /* gate check + */ handle_train(&state.transformer, args, ...) } // "spf_transformer_metrics" => { /* gate check + */ handle_metrics(&state.transformer, ...) } // // ============================================================================ // TESTS // ============================================================================ #[cfg(test)] mod tests { use super::*; fn make_test_config() -> TransformerConfig { TransformerConfig { enabled: true, d_model: 64, n_heads: 4, n_layers: 2, vocab_size: 256, max_seq_len: 64, d_ff: 256, learning_rate: 1e-4, batch_size: 4, online_learning: true, ewc_lambda: 0.4, replay_buffer_size: 100, temperature: 0.7, writer_checkpoint: "test_writer.spfc".to_string(), researcher_checkpoint: "test_researcher.spfc".to_string(), } } #[test] fn test_flint_identity() { assert_eq!(FLINT_NAME, "FLINT"); assert_eq!(FLINT_VERSION, "0.1.0"); } #[test] fn test_tool_definitions_count() { let defs = tool_definitions(); assert_eq!(defs.len(), 7, "Should have 5 transformer + 2 FL-8 tools"); } #[test] fn test_tool_definitions_names() { let defs = tool_definitions(); let names: Vec<&str> = defs.iter() .map(|d| d["name"].as_str().unwrap()) .collect(); assert!(names.contains(&"spf_transformer_status")); assert!(names.contains(&"spf_transformer_infer")); assert!(names.contains(&"spf_transformer_chat")); assert!(names.contains(&"spf_transformer_train")); assert!(names.contains(&"spf_transformer_metrics")); } #[test] fn test_tool_definitions_mention_flint() { let defs = tool_definitions(); for def in &defs { let desc = def["description"].as_str().unwrap(); assert!(desc.contains("FLINT"), "Tool {} description should mention FLINT", def["name"]); } } #[test] fn test_tool_definitions_have_schemas() { let defs = tool_definitions(); for def in &defs { assert!(def.get("inputSchema").is_some(), "Tool {} missing inputSchema", def["name"]); assert_eq!(def["inputSchema"]["type"], "object", "Tool {} schema should be object", def["name"]); } } #[test] fn test_status_not_loaded() { let config = make_test_config(); let result = handle_status(&None, &config); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT: NOT LOADED")); assert!(text.contains("d_model=64")); } #[test] fn test_status_loaded() { let config = make_test_config(); let state = TransformerState::from_config(&config, "writer"); let locked = Arc::new(RwLock::new(state)); let result = handle_status(&Some(locked), &config); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT: LOADED")); assert!(text.contains("writer")); assert!(text.contains("Training step: 0")); } #[test] fn test_infer_not_loaded() { let config = make_test_config(); let args = json!({"prompt": "hello"}); let result = handle_infer(&None, &args, &config, "/nonexistent"); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT not loaded")); } #[test] fn test_infer_missing_prompt() { let config = make_test_config(); let state = TransformerState::from_config(&config, "writer"); let locked = Arc::new(RwLock::new(state)); let args = json!({}); let result = handle_infer(&Some(locked), &args, &config, "/nonexistent"); let text = result["text"].as_str().unwrap(); assert!(text.contains("ERROR: 'prompt' parameter required")); } #[test] fn test_train_not_loaded() { let config = make_test_config(); let args = json!({}); let result = handle_train(&None, &args, &config); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT not loaded")); } #[test] fn test_train_no_collector() { let config = make_test_config(); let state = TransformerState::from_config(&config, "writer"); let locked = Arc::new(RwLock::new(state)); let args = json!({"batch_size": 8}); let result = handle_train(&Some(locked.clone()), &args, &config); let text = result["text"].as_str().unwrap(); // No collector connected → error message assert!(text.contains("No training signal collector")); } #[test] fn test_metrics_not_loaded() { let config = make_test_config(); let result = handle_metrics(&None, &config); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT: NOT LOADED")); } #[test] fn test_metrics_loaded() { let config = make_test_config(); let mut state = TransformerState::from_config(&config, "researcher"); state.batches_completed = 10; state.total_loss = 5.0; state.gate_alignment = 0.85; let locked = Arc::new(RwLock::new(state)); let result = handle_metrics(&Some(locked), &config); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT Metrics")); assert!(text.contains("researcher")); assert!(text.contains("85.00%")); assert!(text.contains("Batches completed: 10")); } #[test] fn test_transformer_state_from_config() { let config = make_test_config(); let state = TransformerState::from_config(&config, "writer"); assert_eq!(state.role, "writer"); assert_eq!(state.training_step, 0); assert!(!state.is_training); assert!(state.last_checkpoint.is_empty()); } #[test] fn test_chat_not_loaded() { let config = make_test_config(); let args = json!({"message": "hello"}); let result = handle_chat(&None, &args, &config, "/nonexistent"); let text = result["text"].as_str().unwrap(); assert!(text.contains("FLINT not loaded")); } }