SPFsmartGATE / src /flint_memory.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - FLINT Memory Router (MB-FR + MB-FT)
// Copyright 2026 Joseph Stone — All Rights Reserved
//
// MB-FR: Background thread — always running, no Claude session required.
// FLINT decides: WHAT gets stored, WHERE it goes, priority, TTL.
// MiniLM (via brain_store) does the embedding — not the decision.
// Watches knowledge/ drop folder for user-added files → auto-index.
//
// MB-FT: Tiered promotion loop — 24hr → 7day → pinned.
// FLINT scores Working memories by relevance * access_count.
// Top 20% promoted. >50% active in window → promote all + touches.
// Rest expired and data files cleaned up.
//
// Learning pipeline:
// PRE (startup): init_brain() + index_knowledge_docs() + index_spf_sources()
// → called in mcp.rs before this thread spawns — nothing to do here.
// DURING (this thread): drain signals → FLINT scores → route → write doc → brain_store()
// AFTER (this thread): tiered promotion every hour
//
// Architecture:
// Vectors (LMDB brain storage) = the MAP — points to data location
// Data files (LIVE/BRAIN/DOCS/) = SOURCE OF TRUTH — actual content
// FLINT = INTELLIGENCE LAYER — routing, scoring, promotion decisions
// MiniLM = EMBEDDING TOOL — converts text to vectors (never trained here)
use std::collections::HashMap;
use std::sync::{Arc, LazyLock, Mutex, RwLock};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use crate::agent_state::{AgentStateDb, MemoryType};
use crate::gate_training::{GateTrainingCollector, TrainingSignal};
use crate::paths::spf_root;
use crate::transformer_tools::TransformerState;
use crate::http::ServerState;
use serde_json::Value;
// ============================================================================
// RE-READ TRACKER (R3-06)
//
// Tracks how many times each file has been read this session.
// First read: full passthrough. Subsequent reads: compressed to head/tail.
// Resets when SPF process restarts (static lifetime = session lifetime).
// ============================================================================
static READ_TRACKER: LazyLock<Mutex<HashMap<String, u32>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
// ============================================================================
// INTERVALS
// ============================================================================
/// How often the router drains gate signals from the collector.
const SIGNAL_DRAIN_INTERVAL: Duration = Duration::from_secs(30);
/// How often the router checks the knowledge/ drop folder for new user files.
const DROP_CHECK_INTERVAL: Duration = Duration::from_secs(60);
/// How often the tiered promotion loop runs.
const TIER_LOOP_INTERVAL: Duration = Duration::from_secs(1800); // 30 min
/// Minimum relevance score to store a signal. Filters routine low-value calls.
const MIN_RELEVANCE: f64 = 0.1;
/// Top N% of Working memories promoted to next tier each cycle.
const PROMOTE_TOP_PERCENT: f64 = 0.20;
/// If more than this fraction of memories are active, promote all of them.
const PROMOTE_ALL_THRESHOLD: f64 = 0.50;
// ============================================================================
// PUBLIC ENTRY POINT — called from mcp.rs after build_listeners()
// ============================================================================
/// Spawn the FLINT Memory Router as a named background thread.
///
/// This is the DURING and AFTER learning pipeline:
/// DURING: drains GateTrainingCollector → FLINT scores → routes to brain
/// AFTER: tiered promotion loop (24hr Working → 7day Fact → Pinned)
///
/// PRE pipeline (startup brain indexing) is handled in mcp.rs before this call.
pub fn start_memory_router(
collector: Arc<GateTrainingCollector>,
transformer: Option<Arc<RwLock<TransformerState>>>,
state: Arc<ServerState>,
) {
std::thread::Builder::new()
.name("flint-memory-router".into())
.spawn(move || {
run_router(collector, transformer, state);
})
.expect("[FLINT-MR] failed to spawn memory router thread");
}
// ============================================================================
// FL-1C — FLINT DISPATCH INTERCEPT
//
// Called from dispatch::call() — runs in the calling transport's thread.
// NOT part of the memory router background thread.
//
// flint_intercept(): pre-execution — queries brain for relevant context
// flint_process_result(): post-execution — stores high-value results
//
// Thread safety:
// Brain = LazyLock<Mutex<Option<Brain>>> — independent of session Mutex
// AgentStateDb = LMDB write transactions — independent of session Mutex
// Both functions run outside session lock — zero contention added.
// ============================================================================
/// Context passed from pre-execution intercept to post-execution processing.
/// Created by flint_intercept(), consumed by flint_process_result().
pub struct FlintContext {
/// Brain search hits relevant to this tool call (empty if bypassed)
pub brain_hits: String,
/// Timestamp of the intercept (used for result storage titles)
pub timestamp: String,
/// True for bypass tools (status/meta) — skip all post-processing
pub skip_processing: bool,
/// File path extracted from args — used by re-read detection (R3-06)
pub target_file: Option<String>,
/// FL-7: Brief summary of tool args for language training pair storage
pub args_summary: String,
}
/// Tools that produce no learning value — skip FLINT intercept entirely.
/// Status, metrics, and meta-inspection tools. Zero overhead bypass.
const BYPASS_TOOLS: &[&str] = &[
"spf_status", "spf_calculate", "spf_session",
"spf_config_paths", "spf_config_stats",
"spf_tmp_list", "spf_tmp_stats", "spf_tmp_active", "spf_tmp_get",
"spf_agent_stats", "spf_agent_session_info", "spf_agent_context",
"spf_brain_status", "spf_brain_list", "spf_brain_list_docs",
"spf_pool_status", "spf_mesh_status", "spf_mesh_peers",
"spf_transformer_status", "spf_transformer_metrics",
"spf_rag_status", "spf_rag_bandwidth_status", "spf_rag_list_feeds",
"spf_rag_list_gathered", "spf_rag_pending_searches",
"spf_chat_rooms", "spf_chat_history",
];
/// FLINT pre-execution intercept — called from dispatch::call() BEFORE session lock.
///
/// Queries brain for context relevant to the current tool call.
/// Bypass tools skip entirely (zero overhead for status/meta calls).
/// FLINT does NOT gate — gate gates. FLINT observes and enriches.
pub fn flint_intercept(tool: &str, args: &Value) -> FlintContext {
let timestamp = chrono::Utc::now().to_rfc3339();
// Bypass: zero-overhead tools that don't benefit from context
if BYPASS_TOOLS.contains(&tool) {
return FlintContext {
brain_hits: String::new(),
timestamp,
skip_processing: true,
target_file: None,
args_summary: String::new(),
};
}
// Extract target file path from args (for Read/Write/Edit tracking)
let target_file = args.get("file_path")
.and_then(|v| v.as_str())
.map(String::from);
// Build search query from tool name + key args
let query = build_intercept_query(tool, args);
// Query brain for relevant context — BRAIN Mutex is independent of session
let brain_hits = if query.len() > 5 {
crate::brain_local::brain_search(&query, "default", 3)
} else {
String::new()
};
// Build Anchor context for code write tools (TR-B/TR-E)
// When AI writes/edits source files, FLINT pre-loads module context from brain.
// spf_source collection contains all src/*.rs indexed at boot.
let anchor_hits = if matches!(tool, "Write" | "Edit" | "spf_write" | "spf_edit") {
if let Some(ref fp) = target_file {
build_anchor_context(fp)
} else {
String::new()
}
} else {
String::new()
};
// Merge brain hits + anchor context
let combined_hits = match (brain_hits.is_empty(), anchor_hits.is_empty()) {
(true, true) => String::new(),
(true, false) => anchor_hits,
(false, true) => brain_hits,
(false, false) => format!("{}\n\n{}", brain_hits, anchor_hits),
};
// FL-7: Extract brief args summary for language training pairs
let args_summary: String = args.as_object()
.map(|obj| {
obj.iter()
.map(|(k, v)| {
let val: String = match v {
Value::String(s) => s.chars().take(200).collect(),
other => {
let s = other.to_string();
s.chars().take(200).collect()
}
};
format!("{}={}", k, val)
})
.collect::<Vec<_>>()
.join(", ")
})
.unwrap_or_default();
FlintContext {
brain_hits: combined_hits,
timestamp,
skip_processing: false,
target_file,
args_summary,
}
}
/// FLINT post-execution processing — called from dispatch::call() AFTER session lock dropped.
///
/// 1. Scores result relevance → stores high-value as Working memories
/// 2. FL-2: Compresses large results for transport (original preserved in brain)
/// 3. FL-3: Injects brain context hits from pre-execution intercept
///
/// Thread safety: brain Mutex and AgentStateDb LMDB txns are independent of session lock.
pub fn flint_process_result(
tool: &str,
result: &Value,
ctx: &FlintContext,
agent_db: &Option<AgentStateDb>,
) -> Value {
// Skip bypass tools
if ctx.skip_processing {
return result.clone();
}
// Skip blocked results — no execution happened, nothing to store
if result.get("_blocked").and_then(|v| v.as_bool()).unwrap_or(false) {
return result.clone();
}
// Extract text content from result
let text = result.get("text").and_then(|v| v.as_str()).unwrap_or("");
// Score result relevance for memory storage
let relevance = score_result(tool, text);
// Store high-value results as Working memories in agent_state
if relevance > 0.5 {
if let Some(ref db) = agent_db {
let summary: String = text.chars().take(500).collect();
let tags = vec![
format!("tool:{}", tool),
"source:flint_dispatch".to_string(),
format!("relevance:{:.2}", relevance),
];
if let Err(e) = db.create_memory(&summary, MemoryType::Working, tags, "flint_dispatch") {
eprintln!("[FLINT] result memory store error: {}", e);
}
}
}
// FL-7: Store tool call context as language training pair in agent_state.
// Key: lang:{timestamp} — consumed by future NextToken training passes.
// Format: "tool|args_summary|result_summary" — compact context→response pair.
if !ctx.args_summary.is_empty() && !text.is_empty() {
if let Some(ref db) = agent_db {
let result_summary: String = text.chars().take(500).collect();
let pair = format!("{}|{}|{}", tool, ctx.args_summary, result_summary);
let lang_key = format!("lang:{}", ctx.timestamp);
let _ = db.set_state(&lang_key, &pair);
}
}
// ── FL-2: Preserve original in brain BEFORE compression ──────────────────
// Results > 2000 chars stored as source of truth — retrievable via brain_recall.
// Enables DIGEST compression without data loss.
if text.len() > 2000 {
let ts_date = &ctx.timestamp[..ctx.timestamp.len().min(10)];
let title = format!("result:{}:{}", tool, ts_date);
crate::brain_local::brain_store(text, &title, "flint_results");
}
// ── R3-06: Track reads for build anchor context (compression removed)
// Re-read compression caused Read failures for non-Claude LLMs (Qwen, Gemini) —
// they received truncated head/tail instead of file content and had no way to
// recover it. Tracker retained for build anchor hinting; compression disabled.
if tool == "Read" {
if let Some(ref fp) = ctx.target_file {
if let Ok(mut tracker) = READ_TRACKER.lock() {
let count = tracker.entry(fp.clone()).or_insert(0);
*count += 1;
}
}
}
// ── Compress result for transport ────────────────────────────────────────
let compressed = compress_result(tool, text);
// ── FL-3: Build output with compression + context injection ─────────────
let mut output = result.clone();
if let Some(obj) = output.as_object_mut() {
// Apply compression if text was reduced
if compressed.len() < text.len() {
obj.insert("text".to_string(), Value::String(compressed));
}
// Inject brain context if available and meaningful
if let Some(ctx_val) = build_context_injection(&ctx.brain_hits) {
obj.insert("_flint_context".to_string(), ctx_val);
}
}
output
}
/// Build a search query from tool name and arguments for brain context lookup.
fn build_intercept_query(tool: &str, args: &Value) -> String {
// Extract the most relevant argument for context search
let key_arg = if let Some(fp) = args.get("file_path").and_then(|v| v.as_str()) {
fp.to_string()
} else if let Some(q) = args.get("query").and_then(|v| v.as_str()) {
q.to_string()
} else if let Some(p) = args.get("pattern").and_then(|v| v.as_str()) {
p.to_string()
} else if let Some(cmd) = args.get("command").and_then(|v| v.as_str()) {
cmd.chars().take(100).collect()
} else if let Some(text) = args.get("text").and_then(|v| v.as_str()) {
text.chars().take(100).collect()
} else if let Some(url) = args.get("url").and_then(|v| v.as_str()) {
url.to_string()
} else {
String::new()
};
if key_arg.is_empty() {
tool.to_string()
} else {
format!("{} {}", tool, key_arg)
}
}
/// Score the relevance of a tool result for memory storage.
/// High-value: data retrieval results, search findings, error messages.
/// Low-value: empty results, trivial confirmations.
fn score_result(tool: &str, text: &str) -> f64 {
let len = text.len();
// Empty or trivial — no storage value
if len < 20 {
return 0.0;
}
// Length score (logarithmic — diminishing returns past ~1000 chars)
let len_score = ((len as f64).ln() / 10.0).min(1.0);
// Tool type weight — data retrieval tools produce more valuable results
let tool_weight = match tool {
// Data retrieval — highest value results
"Read" | "spf_brain_search" | "spf_brain_recall" | "spf_brain_context" => 0.8,
"Grep" | "Glob" => 0.7,
// Execution — results carry learning data
"Bash" | "spf_web_fetch" | "spf_web_search" | "spf_rag_smart_search" => 0.6,
// Mutation confirmation — moderate value
"Write" | "Edit" | "spf_brain_store" | "spf_brain_index" => 0.4,
// Chat/mesh — context for episodic memory
"spf_chat_send" | "spf_mesh_call" => 0.5,
// Default
_ => 0.5,
};
(len_score * tool_weight).min(1.0)
}
// ============================================================================
// FL-2 — RESULT COMPRESSION
//
// Three tiers based on result size:
// FULL: < 500 chars → pass through unchanged
// SUMMARY: 500-5000 → first 8 lines + last 3 lines + byte/line stats
// DIGEST: > 5000 → first 200 chars + last 100 chars + stats + recall hint
//
// Original always preserved in brain (>2000 threshold) before compression.
// Passthrough tools (brain, voice, chat, mesh) skip compression entirely.
// ============================================================================
/// FL-2: Compress result text based on size tiers.
/// Passthrough tools return text unchanged — already compact or real-time.
fn compress_result(tool: &str, text: &str) -> String {
// File reads always return full content — never truncate for any LLM.
// Truncating file reads breaks non-Claude LLMs (Qwen, Gemini) that cannot
// recover content via spf_brain_recall.
if tool == "Read" {
return text.to_string();
}
// Passthrough: brain results already compact, voice/chat are real-time
if matches!(tool,
"spf_brain_search" | "spf_brain_recall" | "spf_brain_context" |
"spf_brain_store" | "spf_brain_index" | "spf_brain_list" |
"spf_brain_get_doc" | "spf_brain_list_docs" |
"spf_voice_mode" | "spf_voice_call" | "spf_voice_team" |
"spf_chat_send" | "spf_chat_history" | "spf_chat_rooms" |
"spf_mesh_call" | "spf_mesh_status" | "spf_mesh_peers"
) {
return text.to_string();
}
// Copyright 2026 Joseph Stone — All Rights Reserved
// 24-hour grace period — let raw data flow through uncompressed
static GATE_START: std::sync::LazyLock<std::time::Instant> =
std::sync::LazyLock::new(std::time::Instant::now);
if GATE_START.elapsed() < std::time::Duration::from_secs(86400) {
return text.to_string();
}
let len = text.len();
// FULL: < 500 chars — pass through unchanged
if len < 500 {
return text.to_string();
}
// SUMMARY: 500-5000 chars — key lines + stats
if len <= 5000 {
let lines: Vec<&str> = text.lines().collect();
let line_count = lines.len();
// If few lines (content is dense, not verbose), pass through
if line_count <= 15 {
return text.to_string();
}
// First 8 lines (context/header) + last 3 lines (summary/tail)
let head: String = lines[..8].join("\n");
let tail: String = lines[line_count - 3..].join("\n");
return format!(
"{}\n\n[FLINT: {} lines, {} bytes — showing head/tail]\n\n{}",
head, line_count, len, tail
);
}
// DIGEST: > 5000 chars — first 200 + last 100 + stats + recall hint
let head: String = text.chars().take(200).collect();
let total_chars = text.chars().count();
let tail: String = text.chars().skip(total_chars.saturating_sub(100)).collect();
let line_count = text.lines().count();
format!(
"{}\n\n[FLINT DIGEST: {} bytes, {} lines — original stored in brain collection=\"flint_results\".\n Use spf_brain_recall(collection=\"flint_results\") to retrieve full content.]\n\n{}",
head, len, line_count, tail
)
}
// ============================================================================
// FL-3 — CONTEXT INJECTION
//
// Attaches brain context from pre-execution intercept to tool response.
// Claude sees result + relevant memories in one response — saves 1-3 separate
// brain_search calls per complex task.
//
// Context budget: max ~2000 chars (~500 tokens).
// "_flint_context" field is ignored by MCP protocol — additive, no breakage.
// ============================================================================
/// FL-3: Build context injection value from brain search hits.
/// Returns None if brain_hits are empty, error, or no-results.
fn build_context_injection(brain_hits: &str) -> Option<Value> {
// Skip empty, error, or no-result responses
if brain_hits.is_empty()
|| brain_hits.starts_with("No results")
|| brain_hits.starts_with("Brain not initialized")
|| brain_hits.starts_with("Brain search error")
{
return None;
}
// Budget: max ~2000 chars (~500 tokens) to avoid bloating responses
let truncated: String = brain_hits.chars().take(2000).collect();
Some(serde_json::json!({
"hits": truncated,
"source": "flint_intercept"
}))
}
// ============================================================================
// BUILD ANCHOR — Brain-assisted source context for code write tools
//
// When the AI writes or edits a file, FLINT pre-loads relevant source context
// from the brain's spf_source collection (indexed at boot by index_spf_sources).
// This replaces manual file reads for Build Anchor Check — same data, fewer tokens.
//
// Called from flint_intercept() for Write/Edit tools.
// Returns empty string on failure — non-blocking, additive only.
// ============================================================================
/// Build Anchor context from brain — returns src file summaries
/// for the target file and its dependencies.
///
/// Queries brain's "spf_source" collection which contains all src/*.rs
/// files indexed at startup by brain_local::index_spf_sources().
pub fn build_anchor_context(target_file: &str) -> String {
// Extract filename from full path for brain query
let filename = target_file
.rsplit('/')
.next()
.unwrap_or(target_file);
// Query brain for target file's module content
let file_ctx = crate::brain_local::brain_search(
&format!("file:{} module functions structs", filename),
"spf_source",
3,
);
// Skip if brain returned nothing useful
if file_ctx.starts_with("No results")
|| file_ctx.starts_with("Brain not initialized")
|| file_ctx.starts_with("Brain search error")
{
return String::new();
}
// Query brain for connected types/imports referenced by this file
let dep_ctx = crate::brain_local::brain_search(
&format!("imports dependencies types used by {}", filename),
"spf_source",
3,
);
let mut out = format!("BUILD ANCHOR (brain — spf_source):\n{}", file_ctx);
if !dep_ctx.starts_with("No results")
&& !dep_ctx.starts_with("Brain not initialized")
&& !dep_ctx.starts_with("Brain search error")
{
out.push_str(&format!("\n\nCONNECTED TYPES:\n{}", dep_ctx));
}
// Budget: cap at ~3000 chars (~750 tokens) to avoid bloating intercept
if out.len() > 3000 {
out.truncate(3000);
out.push_str("\n[FLINT: anchor context truncated at 3000 chars]");
}
out
}
// ============================================================================
// ROUTER LOOP
// ============================================================================
fn run_router(
collector: Arc<GateTrainingCollector>,
transformer: Option<Arc<RwLock<TransformerState>>>,
state: Arc<ServerState>,
) {
// FL-5 + FL-6: Restore confusion matrix and FP-locked signals from LMDB
let cm_db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB");
if let Ok(db) = AgentStateDb::open(&cm_db_path) {
// FL-5: Confusion matrix
if let Ok(Some(saved)) = db.get_state_typed::<crate::gate_training::ConfusionMatrix>("training:confusion_matrix") {
let total = saved.total();
collector.restore_matrix(saved);
eprintln!("[FLINT-MR] Restored confusion matrix: {} decisions", total);
}
// FL-6: FP-locked signals
if let Ok(Some(fp_signals)) = db.get_state_typed::<Vec<crate::gate_training::TrainingSignal>>("training:fp_locked") {
let count = fp_signals.len();
collector.restore_fp_locked(fp_signals);
eprintln!("[FLINT-MR] Restored {} FP-locked signals", count);
}
}
let mut last_drop_check = Instant::now();
let mut last_tier_run = Instant::now();
let mut last_train = Instant::now();
loop {
std::thread::sleep(SIGNAL_DRAIN_INTERVAL);
// DURING: drain gate signals and route to brain
route_signals(&collector);
// DURING: watch knowledge/ drop folder for new user files
if last_drop_check.elapsed() >= DROP_CHECK_INTERVAL {
check_drop_folder();
last_drop_check = Instant::now();
}
// AFTER: tiered memory promotion + metrics write
if last_tier_run.elapsed() >= TIER_LOOP_INTERVAL {
run_tiered_promotion();
write_flint_metrics(&state);
last_tier_run = Instant::now();
}
// FL-10: Auto-train — 1hr interval OR 16+ tlog:* signals in LMDB.
// LMDB-based count captures ALL signal sources: local gate decisions,
// mesh brain_sync, pipeline workers, evil/good user labels.
// Survives restarts. handle_train() deletes consumed keys.
if transformer.is_some() {
let tlog_count = {
let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB");
let mut count = 0usize;
if let Ok(db) = AgentStateDb::open(&db_path) {
if let Ok(keys) = db.list_state_keys() {
count = keys.iter().filter(|k| k.starts_with("tlog:")).count();
}
}
count
};
if last_train.elapsed() >= Duration::from_secs(3600) || tlog_count >= 16 {
let config = crate::config::TransformerConfig::load(
&spf_root().join("LIVE/CONFIG/transformer.json"),
)
.unwrap_or_default();
let args = serde_json::json!({"batch_size": config.batch_size});
let result = crate::transformer_tools::handle_train(&transformer, &args, &config);
eprintln!("[FLINT-MR] Auto-train (tlog: {}): {}", tlog_count, result);
last_train = Instant::now();
}
}
}
}
// ============================================================================
// R3-10 (MB-FM) — FLINT Auto-Metrics
//
// Writes FLINT_METRICS.txt via dispatch::call() with Source::Transformer.
// Full gate pipeline: validate_write → content inspect → execute.
// Runs on hourly cycle (same as tiered promotion). Best-effort — if lock
// contended or gate blocks, logs and continues. Never stalls the router.
// ============================================================================
fn write_flint_metrics(state: &Arc<ServerState>) {
let agent_db = match state.agent_db.as_ref() {
Some(db) => db,
None => return,
};
let working = agent_db.get_by_type(MemoryType::Working).map(|v| v.len()).unwrap_or(0);
let fact = agent_db.get_by_type(MemoryType::Fact).map(|v| v.len()).unwrap_or(0);
let pinned = agent_db.get_by_type(MemoryType::Pinned).map(|v| v.len()).unwrap_or(0);
let (total_mem, sessions, state_keys, tags) = agent_db.db_stats().unwrap_or((0, 0, 0, 0));
let timestamp = chrono::Utc::now().to_rfc3339();
let content = format!(
"# FLINT METRICS — Auto-generated by FLINT router\n\
# Updated: {}\n\n\
Working memories: {}\n\
Fact memories: {}\n\
Pinned memories: {}\n\
Total memories: {}\n\
Sessions: {}\n\
State keys: {}\n\
Tags: {}\n",
timestamp, working, fact, pinned, total_mem, sessions, state_keys, tags
);
let path = spf_root()
.join("LIVE/PROJECTS/PROJECTS/FLINT_METRICS.txt")
.to_string_lossy()
.to_string();
let args = serde_json::json!({
"file_path": path,
"content": content,
});
let resp = crate::dispatch::call(
state,
crate::dispatch::Source::Transformer {
role: "flint-router".into(),
model_id: "memory".into(),
},
"Write",
&args,
);
if resp.status != "ok" {
eprintln!("[FLINT-MR] metrics write: {}", resp.result);
}
}
// ============================================================================
// DURING — Gate Signal Routing
//
// FLINT decides: what gets stored, which collection, what relevance score.
// MiniLM (called inside brain_store) handles the actual embedding — it is
// a utility, not a decision maker. FLINT routes. MiniLM indexes.
// ============================================================================
fn route_signals(collector: &Arc<GateTrainingCollector>) -> usize {
let signals = collector.drain_signals();
if signals.is_empty() {
return 0;
}
let count = signals.len();
// Open agent_state LMDB once per drain cycle — feeds tiered promotion
let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB");
let agent_db = AgentStateDb::open(&db_path).ok();
for signal in &signals {
// FLINT scoring — how relevant is this signal to FLINT's learning
let relevance = score_signal(signal);
// Noise filter: skip low-value routine allowed calls
if relevance < MIN_RELEVANCE && !signal.false_positive && signal.evil_score < 0.1 {
continue;
}
// FLINT routing decision — which collection does this belong in
let collection = route_collection(signal);
// Format signal as human-readable text (this is the DATA stored on disk)
let text = format_signal_text(signal, relevance);
let title = format!("gate:{}:{}", signal.tool, &signal.timestamp[..10]);
// Write data file to LIVE/BRAIN/DOCS/ — source of truth
write_brain_doc(&text, &title);
// Index into brain: MiniLM embeds text → vector stored in brain LMDB
// Vector points back to the data file location
let store_result = crate::brain_local::brain_store(&text, &title, collection);
// LM-2: Track consecutive store failures — trigger memory expiry to free space
{
static STORE_FAILURES: std::sync::atomic::AtomicU32 = std::sync::atomic::AtomicU32::new(0);
if store_result.contains("error") || store_result.contains("not initialized") {
let failures = STORE_FAILURES.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + 1;
if failures >= 3 {
eprintln!("[FLINT-MR] brain_store failed {} consecutive times — expiring old memories", failures);
if let Some(ref db) = agent_db {
match db.expire_memories() {
Ok(n) => eprintln!("[FLINT-MR] expired {} old memories", n),
Err(e) => eprintln!("[FLINT-MR] expire failed: {}", e),
}
}
STORE_FAILURES.store(0, std::sync::atomic::Ordering::Relaxed);
}
} else {
STORE_FAILURES.store(0, std::sync::atomic::Ordering::Relaxed);
}
}
// ── TRAINING SIGNAL: tlog:* entry for FLINT handle_train() consumption ──────
// Key: tlog:{timestamp} — handle_train() reads tlog:* entries instead of
// draining the collector (which route_signals already drained).
// Eliminates the drain race that starved training of signals.
// NOTE: Working memories now come ONLY from flint_process_result() above.
// route_signals feeds brain vectors + tlogs for training, NOT Working memories.
if let Some(ref db) = agent_db {
if let Ok(json) = serde_json::to_string(signal) {
let tlog_key = format!("tlog:{}", signal.timestamp);
if let Err(e) = db.set_state(&tlog_key, &json) {
eprintln!("[FLINT-MR] tlog persist error: {}", e);
}
}
}
}
// FL-5 + FL-6: Persist confusion matrix and FP-locked signals to LMDB.
// Survives restarts — restored in run_router() at startup.
let cm_db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB");
if let Ok(cm_db) = AgentStateDb::open(&cm_db_path) {
// FL-5: Confusion matrix
let matrix = collector.confusion_matrix();
if matrix.total() > 0 {
if let Err(e) = cm_db.set_state_typed("training:confusion_matrix", &matrix) {
eprintln!("[FLINT-MR] confusion matrix persist error: {}", e);
}
}
// FL-6: FP-locked signals — security failures that must survive restarts
let fp_locked = collector.get_fp_locked();
if !fp_locked.is_empty() {
if let Err(e) = cm_db.set_state_typed("training:fp_locked", &fp_locked) {
eprintln!("[FLINT-MR] FP-locked persist error: {}", e);
}
}
}
count
}
/// FLINT scoring — relevance of this signal for learning.
///
/// Evil and false positive signals are maximum value — FLINT must learn these patterns fast.
/// Blocked calls are high value — gate decision data.
/// Routine allowed calls are low value — filtered unless user overrides.
fn score_signal(signal: &TrainingSignal) -> f64 {
let weight = signal.weight() as f64;
let label_abs = signal.label().abs() as f64;
let evil = signal.evil_score as f64;
let repeat = (signal.recent_call_count as f64 * 0.1).min(1.0);
// Maximum priority: confirmed evil or false positive
if signal.evil_score > 0.4 || signal.false_positive {
return 1.0;
}
// High priority: blocked calls (gate decision data)
if !signal.allowed {
return (weight * label_abs * 0.8 + evil + repeat).min(1.0);
}
// User override: behavioral pattern worth keeping
if signal.user_override {
return (weight * label_abs * 0.6 + repeat).min(1.0);
}
// Routine allowed: low baseline, filtered by MIN_RELEVANCE
(weight * label_abs * 0.3 + evil + repeat * 0.5).min(1.0)
}
/// FLINT routing — which brain collection does this signal belong in.
///
/// flint_training: gate decision data (threats, blocks, corrections, FPs)
/// flint_episodic: behavioral patterns (user overrides, session context)
fn route_collection(signal: &TrainingSignal) -> &'static str {
// Threats and corrections → training data
if signal.evil_score > 0.4 || signal.false_positive || !signal.allowed {
return "flint_training";
}
// User override → episodic (behavioral pattern, not gate data)
if signal.user_override {
return "flint_episodic";
}
// Regular allowed → gate alignment training data
"flint_training"
}
/// Format a TrainingSignal as readable text for storage and embedding.
fn format_signal_text(signal: &TrainingSignal, relevance: f64) -> String {
format!(
"GATE SIGNAL | tool={} source={} allowed={} label={:.1} weight={:.1} evil={:.2} relevance={:.2} fp={} override={} | context=[{}] | ts={}",
signal.tool,
signal.source,
signal.allowed,
signal.label(),
signal.weight(),
signal.evil_score,
relevance,
signal.false_positive,
signal.user_override,
signal.preceding_tools.join(","),
signal.timestamp,
)
}
/// Write data file to LIVE/BRAIN/DOCS/ — this is the source of truth.
/// Vectors in the brain LMDB point back to these files.
fn write_brain_doc(text: &str, title: &str) {
let docs_dir = spf_root().join("LIVE/BRAIN/DOCS");
if !docs_dir.exists() {
if let Err(e) = std::fs::create_dir_all(&docs_dir) {
eprintln!("[FLINT-MR] failed to create BRAIN/DOCS: {}", e);
return;
}
}
let ts = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_millis())
.unwrap_or(0);
// Stable hash from title for reproducible filenames
let hash: u64 = title
.bytes()
.fold(0u64, |acc, b| acc.wrapping_mul(31).wrapping_add(b as u64));
let filename = format!("{:08x}{:08x}.txt", (ts & 0xFFFFFFFF) as u32, hash as u32);
let path = docs_dir.join(&filename);
if let Err(e) = std::fs::write(&path, text) {
eprintln!("[FLINT-MR] failed to write brain doc {}: {}", filename, e);
}
}
// ============================================================================
// DURING — Knowledge Drop Folder Watcher
//
// User drops .md / .txt / .rs / .json files into:
// LIVE/TMP/stoneshell-brain/knowledge/
// FLINT auto-indexes them into the "flint_knowledge" brain collection.
// A .indexed marker file tracks which files have already been processed.
// ============================================================================
fn check_drop_folder() {
let drop_dir = spf_root().join("LIVE/TMP/stoneshell-brain/knowledge");
if !drop_dir.exists() {
return;
}
// Track indexed files via a marker file in the same directory
let indexed_marker = drop_dir.join(".indexed");
let already_indexed: std::collections::HashSet<String> =
std::fs::read_to_string(&indexed_marker)
.unwrap_or_default()
.lines()
.filter(|l| !l.is_empty())
.map(str::to_owned)
.collect();
let entries = match std::fs::read_dir(&drop_dir) {
Ok(e) => e,
Err(_) => return,
};
let mut newly_indexed: Vec<String> = Vec::new();
for entry in entries.flatten() {
let path = entry.path();
if !path.is_file() {
continue;
}
let name = path
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("")
.to_string();
// Skip hidden files and marker
if name.starts_with('.') {
continue;
}
// Only index known text formats
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
if !matches!(ext, "md" | "txt" | "rs" | "json") {
continue;
}
if already_indexed.contains(&name) {
continue;
}
// FLINT calls MiniLM (via brain_index_path) to embed and store
let path_str = path.to_string_lossy().to_string();
let result = crate::brain_local::brain_index_path(&path_str, "flint_knowledge");
eprintln!("[FLINT-MR] indexed drop file '{}': {}", name, result);
newly_indexed.push(name);
}
if !newly_indexed.is_empty() {
// Append to marker file so they aren't re-indexed on next check
let mut all: Vec<String> = already_indexed.into_iter().collect();
all.extend(newly_indexed);
let _ = std::fs::write(&indexed_marker, all.join("\n") + "\n");
}
}
// ============================================================================
// AFTER — Tiered Promotion Loop (MB-FT)
//
// FLINT scores all Working memories by relevance * access frequency.
// Top 20% → promoted to next tier (longer TTL).
// Special rule: if >50% are active → promote all of them.
// Rest expire naturally. Data files for expired memories are not deleted
// (brain vectors become orphaned — acceptable, future cleanup task).
//
// Tiers:
// Working (24h TTL) → Fact (7day TTL)
// Fact (7day TTL) → Pinned (never expires)
// ============================================================================
fn run_tiered_promotion() {
let db_path = spf_root().join("LIVE/LMDB5/LMDB5.DB");
let db = match AgentStateDb::open(&db_path) {
Ok(d) => d,
Err(e) => {
eprintln!("[FLINT-MR] tiered promotion: cannot open LMDB5: {}", e);
return;
}
};
// Expire TTL-expired memories first — clean up before scoring
match db.expire_memories() {
Ok(n) => {
if n > 0 {
eprintln!("[FLINT-MR] expired {} stale memories", n);
}
}
Err(e) => eprintln!("[FLINT-MR] expire_memories error: {}", e),
}
// FLINT: Working (24h) → Fact (7day)
promote_tier(&db, MemoryType::Working, MemoryType::Fact, "1h→12h");
// FLINT: Fact (7day) → Pinned (never)
promote_tier(&db, MemoryType::Fact, MemoryType::Pinned, "12h→48h");
}
/// Promote the top N% of memories from one tier to the next.
/// FLINT scores by relevance * (1 + access_count * 0.1).
/// High access frequency signals a memory that FLINT keeps returning to —
/// these are the most valuable to preserve long-term.
fn promote_tier(db: &AgentStateDb, from: MemoryType, to: MemoryType, label: &str) {
let memories = match db.get_by_type(from) {
Ok(m) => m,
Err(e) => {
eprintln!("[FLINT-MR] get_by_type({:?}) error: {}", from, e);
return;
}
};
if memories.is_empty() {
return;
}
// FLINT scores each memory — relevance * access frequency weight
let mut scored: Vec<(f64, &crate::agent_state::MemoryEntry)> = memories
.iter()
.map(|m| {
let score = m.relevance * (1.0 + m.access_count as f64 * 0.1);
(score, m)
})
.collect();
// Sort descending — highest FLINT score first
scored.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
// Special rule: >50% active → promote everything
let active_count = scored.iter().filter(|(s, _)| *s > 0.5).count();
let promote_all = active_count as f64 / scored.len() as f64 > PROMOTE_ALL_THRESHOLD;
let promote_n = if promote_all {
scored.len()
} else {
((scored.len() as f64 * PROMOTE_TOP_PERCENT).ceil() as usize).max(1)
};
let mut promoted = 0usize;
for (_, mem) in scored.iter().take(promote_n) {
// Tag the promoted memory so its origin tier is traceable
let mut new_tags = mem.tags.clone();
new_tags.push(format!("promoted:{}", label));
match db.create_memory(&mem.content, to, new_tags, &mem.source) {
Ok(_) => {
// Delete old tier entry — prevents re-promotion and LMDB bloat
let _ = db.forget(&mem.id);
promoted += 1;
}
Err(e) => eprintln!("[FLINT-MR] promote error ({}): {}", label, e),
}
}
if promoted > 0 {
eprintln!(
"[FLINT-MR] promoted {}/{} memories ({}{})",
promoted,
memories.len(),
label,
if promote_all { " — all active" } else { "" }
);
}
}