| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use anyhow::{anyhow, Result}; |
| use heed::types::*; |
| use heed::{Database, Env, EnvOpenOptions}; |
| use serde::{Deserialize, Serialize}; |
| use std::path::Path; |
|
|
| |
| use crate::config::{ |
| EnforceMode, TierThreshold, TierConfig, FormulaConfig, |
| ToolWeight, ComplexityWeights, SpfConfig, |
| CommandPerm, |
| }; |
|
|
| const MAX_DB_SIZE: usize = 10 * 1024 * 1024; |
|
|
| |
| pub struct SpfConfigDb { |
| env: Env, |
| |
| config: Database<Str, Str>, |
| |
| paths: Database<Str, SerdeBincode<bool>>, |
| |
| patterns: Database<Str, SerdeBincode<u8>>, |
| |
| |
| |
| |
| |
| commands: Database<Str, SerdeBincode<CommandPerm>>, |
| } |
|
|
| |
| |
| |
|
|
| impl SpfConfigDb { |
| |
| pub fn open(path: &Path) -> Result<Self> { |
| std::fs::create_dir_all(path)?; |
|
|
| let env = unsafe { |
| EnvOpenOptions::new() |
| .map_size(MAX_DB_SIZE) |
| .max_dbs(9) |
| .open(path)? |
| }; |
|
|
| let mut wtxn = env.write_txn()?; |
| let config = env.create_database(&mut wtxn, Some("config"))?; |
| let paths = env.create_database(&mut wtxn, Some("paths"))?; |
| let patterns = env.create_database(&mut wtxn, Some("patterns"))?; |
| let commands = env.create_database(&mut wtxn, Some("commands"))?; |
| wtxn.commit()?; |
|
|
| log::info!("SPF Config LMDB opened at {:?}", path); |
| Ok(Self { env, config, paths, patterns, commands }) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn get(&self, namespace: &str, key: &str) -> Result<Option<String>> { |
| let full_key = format!("{}:{}", namespace, key); |
| let rtxn = self.env.read_txn()?; |
| Ok(self.config.get(&rtxn, &full_key)?.map(|s| s.to_string())) |
| } |
|
|
| |
| pub fn set(&self, namespace: &str, key: &str, value: &str) -> Result<()> { |
| let full_key = format!("{}:{}", namespace, key); |
| let mut wtxn = self.env.write_txn()?; |
| self.config.put(&mut wtxn, &full_key, value)?; |
| wtxn.commit()?; |
| Ok(()) |
| } |
|
|
| |
| pub fn get_typed<T: for<'de> Deserialize<'de>>(&self, namespace: &str, key: &str) -> Result<Option<T>> { |
| match self.get(namespace, key)? { |
| Some(json) => Ok(Some(serde_json::from_str(&json)?)), |
| None => Ok(None), |
| } |
| } |
|
|
| |
| pub fn set_typed<T: Serialize>(&self, namespace: &str, key: &str, value: &T) -> Result<()> { |
| let json = serde_json::to_string(value)?; |
| self.set(namespace, key, &json) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn allow_path(&self, path: &str) -> Result<()> { |
| let key = format!("allowed:{}", path); |
| let mut wtxn = self.env.write_txn()?; |
| self.paths.put(&mut wtxn, &key, &true)?; |
| wtxn.commit()?; |
| Ok(()) |
| } |
|
|
| |
| pub fn block_path(&self, path: &str) -> Result<()> { |
| let key = format!("blocked:{}", path); |
| let mut wtxn = self.env.write_txn()?; |
| self.paths.put(&mut wtxn, &key, &true)?; |
| wtxn.commit()?; |
| Ok(()) |
| } |
|
|
| |
| pub fn remove_path_rule(&self, rule_type: &str, path: &str) -> Result<bool> { |
| let key = format!("{}:{}", rule_type, path); |
| let mut wtxn = self.env.write_txn()?; |
| let deleted = self.paths.delete(&mut wtxn, &key)?; |
| wtxn.commit()?; |
| Ok(deleted) |
| } |
|
|
| |
| pub fn is_path_allowed(&self, path: &str) -> Result<bool> { |
| let canonical = match std::fs::canonicalize(path) { |
| Ok(p) => p.to_string_lossy().to_string(), |
| Err(_) => { |
| if path.contains("..") { |
| return Ok(false); |
| } |
| path.to_string() |
| } |
| }; |
| let rtxn = self.env.read_txn()?; |
| let iter = self.paths.iter(&rtxn)?; |
|
|
| for result in iter { |
| let (key, _) = result?; |
| if key.starts_with("allowed:") { |
| let allowed_path = &key[8..]; |
| if canonical.starts_with(allowed_path) { |
| return Ok(true); |
| } |
| } |
| } |
| Ok(false) |
| } |
|
|
| |
| pub fn is_path_blocked(&self, path: &str) -> Result<bool> { |
| let canonical = match std::fs::canonicalize(path) { |
| Ok(p) => p.to_string_lossy().to_string(), |
| Err(_) => { |
| if path.contains("..") { |
| return Ok(true); |
| } |
| path.to_string() |
| } |
| }; |
|
|
| let rtxn = self.env.read_txn()?; |
| let iter = self.paths.iter(&rtxn)?; |
|
|
| for result in iter { |
| let (key, _) = result?; |
| if key.starts_with("blocked:") { |
| let blocked_path = &key[8..]; |
| if canonical.starts_with(blocked_path) { |
| return Ok(true); |
| } |
| } |
| } |
| Ok(false) |
| } |
|
|
| |
| pub fn list_path_rules(&self) -> Result<Vec<(String, String)>> { |
| let rtxn = self.env.read_txn()?; |
| let iter = self.paths.iter(&rtxn)?; |
|
|
| let mut rules = Vec::new(); |
| for result in iter { |
| let (key, _) = result?; |
| if let Some((rule_type, path)) = key.split_once(':') { |
| rules.push((rule_type.to_string(), path.to_string())); |
| } |
| } |
| Ok(rules) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn add_dangerous_pattern(&self, pattern: &str, severity: u8) -> Result<()> { |
| let mut wtxn = self.env.write_txn()?; |
| self.patterns.put(&mut wtxn, pattern, &severity.min(10))?; |
| wtxn.commit()?; |
| Ok(()) |
| } |
|
|
| |
| pub fn check_dangerous(&self, command: &str) -> Result<Option<u8>> { |
| let rtxn = self.env.read_txn()?; |
| let iter = self.patterns.iter(&rtxn)?; |
|
|
| let mut max_severity: Option<u8> = None; |
| for result in iter { |
| let (pattern, severity) = result?; |
| if command.contains(pattern) { |
| max_severity = Some(max_severity.map_or(severity, |s| s.max(severity))); |
| } |
| } |
| Ok(max_severity) |
| } |
|
|
| |
| pub fn list_dangerous_patterns(&self) -> Result<Vec<(String, u8)>> { |
| let rtxn = self.env.read_txn()?; |
| let iter = self.patterns.iter(&rtxn)?; |
|
|
| let mut patterns = Vec::new(); |
| for result in iter { |
| let (pattern, severity) = result?; |
| patterns.push((pattern.to_string(), severity)); |
| } |
| Ok(patterns) |
| } |
|
|
| |
| |
| |
| |
| |
|
|
| |
| pub fn add_command(&self, context: &str, cmd: &str, perm: CommandPerm) -> Result<()> { |
| let key = format!("{}:{}", context, cmd); |
| let mut wtxn = self.env.write_txn()?; |
| self.commands.put(&mut wtxn, &key, &perm)?; |
| wtxn.commit()?; |
| Ok(()) |
| } |
|
|
| |
| pub fn remove_command(&self, context: &str, cmd: &str) -> Result<bool> { |
| let key = format!("{}:{}", context, cmd); |
| let mut wtxn = self.env.write_txn()?; |
| let deleted = self.commands.delete(&mut wtxn, &key)?; |
| wtxn.commit()?; |
| Ok(deleted) |
| } |
|
|
| |
| pub fn list_commands(&self, context: &str) -> Result<Vec<(String, CommandPerm)>> { |
| let prefix = format!("{}:", context); |
| let rtxn = self.env.read_txn()?; |
| let iter = self.commands.iter(&rtxn)?; |
|
|
| let mut cmds = Vec::new(); |
| for result in iter { |
| let (key, perm) = result?; |
| if key.starts_with(&prefix) { |
| let cmd_name = &key[prefix.len()..]; |
| cmds.push((cmd_name.to_string(), perm)); |
| } |
| } |
| Ok(cmds) |
| } |
|
|
| |
| pub fn add_user_fs_path(&self, path: &str) -> Result<()> { |
| let mut paths = self.list_user_fs_paths()?; |
| if !paths.contains(&path.to_string()) { |
| paths.push(path.to_string()); |
| self.set_typed("spf", "user_fs_paths", &paths)?; |
| } |
| Ok(()) |
| } |
|
|
| |
| pub fn remove_user_fs_path(&self, path: &str) -> Result<bool> { |
| let mut paths = self.list_user_fs_paths()?; |
| let before = paths.len(); |
| paths.retain(|p| p != path); |
| if paths.len() < before { |
| self.set_typed("spf", "user_fs_paths", &paths)?; |
| Ok(true) |
| } else { |
| Ok(false) |
| } |
| } |
|
|
| |
| pub fn list_user_fs_paths(&self) -> Result<Vec<String>> { |
| Ok(self.get_typed::<Vec<String>>("spf", "user_fs_paths")? |
| .unwrap_or_default()) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn get_tiers(&self) -> Result<TierConfig> { |
| self.get_typed::<TierConfig>("spf", "tiers")? |
| .ok_or_else(|| anyhow!("Tier config not found")) |
| } |
|
|
| |
| pub fn set_tiers(&self, tiers: &TierConfig) -> Result<()> { |
| self.set_typed("spf", "tiers", tiers) |
| } |
|
|
| |
| |
| pub fn get_tier_for_c(&self, c: u64) -> Result<(&'static str, u8, u8, bool)> { |
| let tiers = self.get_tiers()?; |
|
|
| if c < tiers.simple.max_c { |
| Ok(("SIMPLE", tiers.simple.analyze_percent, tiers.simple.build_percent, tiers.simple.requires_approval)) |
| } else if c < tiers.light.max_c { |
| Ok(("LIGHT", tiers.light.analyze_percent, tiers.light.build_percent, tiers.light.requires_approval)) |
| } else if c < tiers.medium.max_c { |
| Ok(("MEDIUM", tiers.medium.analyze_percent, tiers.medium.build_percent, tiers.medium.requires_approval)) |
| } else { |
| Ok(("CRITICAL", tiers.critical.analyze_percent, tiers.critical.build_percent, tiers.critical.requires_approval)) |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn get_formula(&self) -> Result<FormulaConfig> { |
| self.get_typed::<FormulaConfig>("spf", "formula")? |
| .ok_or_else(|| anyhow!("Formula config not found")) |
| } |
|
|
| |
| pub fn set_formula(&self, formula: &FormulaConfig) -> Result<()> { |
| self.set_typed("spf", "formula", formula) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn get_weights(&self) -> Result<ComplexityWeights> { |
| self.get_typed::<ComplexityWeights>("spf", "weights")? |
| .ok_or_else(|| anyhow!("Complexity weights not found")) |
| } |
|
|
| |
| pub fn set_weights(&self, weights: &ComplexityWeights) -> Result<()> { |
| self.set_typed("spf", "weights", weights) |
| } |
|
|
| |
| pub fn get_tool_weight(&self, tool: &str) -> Result<ToolWeight> { |
| let weights = self.get_weights()?; |
| Ok(match tool.to_lowercase().as_str() { |
| "edit" => weights.edit, |
| "write" => weights.write, |
| "bash_dangerous" => weights.bash_dangerous, |
| "bash_git" => weights.bash_git, |
| "bash_piped" => weights.bash_piped, |
| "bash_simple" | "bash" => weights.bash_simple, |
| "read" => weights.read, |
| "search" | "glob" | "grep" => weights.search, |
| _ => weights.unknown, |
| }) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn get_enforce_mode(&self) -> Result<EnforceMode> { |
| self.get_typed::<EnforceMode>("spf", "enforce_mode")? |
| .ok_or_else(|| anyhow!("Enforce mode not found")) |
| } |
|
|
| |
| pub fn set_enforce_mode(&self, mode: &EnforceMode) -> Result<()> { |
| self.set_typed("spf", "enforce_mode", mode) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn init_defaults(&self) -> Result<()> { |
| |
| if self.get("spf", "version")?.is_some() { |
| return Ok(()); |
| } |
|
|
| self.set("spf", "version", "1.0.0")?; |
| self.set_enforce_mode(&EnforceMode::Max)?; |
| self.set("spf", "require_read_before_edit", "true")?; |
| self.set("spf", "max_write_size", "100000")?; |
|
|
| |
| self.set_tiers(&TierConfig { |
| simple: TierThreshold { max_c: 500, analyze_percent: 40, build_percent: 60, requires_approval: false }, |
| light: TierThreshold { max_c: 2000, analyze_percent: 60, build_percent: 40, requires_approval: false }, |
| medium: TierThreshold { max_c: 10000, analyze_percent: 75, build_percent: 25, requires_approval: false }, |
| critical: TierThreshold { max_c: u64::MAX, analyze_percent: 95, build_percent: 5, requires_approval: true }, |
| })?; |
|
|
| |
| self.set_formula(&FormulaConfig { |
| w_eff: 40000.0, |
| e: std::f64::consts::E, |
| basic_power: 1, |
| deps_power: 7, |
| complex_power: 10, |
| files_multiplier: 10, |
| })?; |
|
|
| |
| self.set_weights(&ComplexityWeights { |
| edit: ToolWeight { basic: 10, dependencies: 2, complex: 1, files: 1 }, |
| write: ToolWeight { basic: 20, dependencies: 2, complex: 1, files: 1 }, |
| bash_dangerous: ToolWeight { basic: 50, dependencies: 5, complex: 2, files: 1 }, |
| bash_git: ToolWeight { basic: 30, dependencies: 3, complex: 1, files: 1 }, |
| bash_piped: ToolWeight { basic: 20, dependencies: 3, complex: 1, files: 1 }, |
| bash_simple: ToolWeight { basic: 10, dependencies: 1, complex: 0, files: 1 }, |
| read: ToolWeight { basic: 5, dependencies: 1, complex: 0, files: 1 }, |
| search: ToolWeight { basic: 8, dependencies: 2, complex: 0, files: 1 }, |
| unknown: ToolWeight { basic: 20, dependencies: 3, complex: 1, files: 1 }, |
| })?; |
|
|
| |
| let home = crate::paths::actual_home().to_string_lossy(); |
| self.allow_path(&format!("{}/", home))?; |
|
|
| |
| let root = crate::paths::spf_root().to_string_lossy(); |
| self.block_path("/tmp")?; |
| self.block_path("/etc")?; |
| self.block_path("/usr")?; |
| self.block_path("/system")?; |
| self.block_path(&crate::paths::system_pkg_path())?; |
| self.block_path(&format!("{}/src/", root))?; |
| self.block_path(&format!("{}/LIVE/SPF_FS/blobs/", root))?; |
| self.block_path(&format!("{}/Cargo.toml", root))?; |
| self.block_path(&format!("{}/Cargo.lock", root))?; |
| self.block_path(&format!("{}/.claude/", home))?; |
| |
| self.block_path(&format!("{}/LIVE/CONFIG.DB", root))?; |
| self.block_path(&format!("{}/LIVE/LMDB5/", root))?; |
| self.block_path(&format!("{}/LIVE/state/", root))?; |
| self.block_path(&format!("{}/LIVE/storage/", root))?; |
| self.block_path(&format!("{}/hooks/", root))?; |
| self.block_path(&format!("{}/scripts/", root))?; |
|
|
| |
| self.add_dangerous_pattern("rm -rf /", 10)?; |
| self.add_dangerous_pattern("rm -rf ~", 10)?; |
| self.add_dangerous_pattern("dd if=", 9)?; |
| self.add_dangerous_pattern("> /dev/", 9)?; |
| self.add_dangerous_pattern("chmod 777", 7)?; |
| self.add_dangerous_pattern("curl | sh", 8)?; |
| self.add_dangerous_pattern("wget | sh", 8)?; |
| self.add_dangerous_pattern("curl|sh", 8)?; |
| self.add_dangerous_pattern("wget|sh", 8)?; |
|
|
| log::info!("SPF Config LMDB initialized with defaults"); |
| Ok(()) |
| } |
|
|
| |
| |
| |
| pub fn sync_tier_approval(&self) -> Result<()> { |
| let mut tiers = self.get_tiers()?; |
| let mut changed = false; |
|
|
| |
| let policy: [(&str, bool); 4] = [ |
| ("SIMPLE", true), |
| ("LIGHT", true), |
| ("MEDIUM", true), |
| ("CRITICAL", true), |
| ]; |
|
|
| let tier_refs = [ |
| &mut tiers.simple, |
| &mut tiers.light, |
| &mut tiers.medium, |
| &mut tiers.critical, |
| ]; |
|
|
| for (i, (name, required)) in policy.iter().enumerate() { |
| if tier_refs[i].requires_approval != *required { |
| log::info!("SPF sync: {} requires_approval {} → {}", name, tier_refs[i].requires_approval, required); |
| tier_refs[i].requires_approval = *required; |
| changed = true; |
| } |
| } |
|
|
| if changed { |
| self.set_tiers(&tiers)?; |
| log::info!("SPF tier approval policy synced"); |
| } |
|
|
| |
| self.set("spf", "version", "3.0.0")?; |
|
|
| Ok(()) |
| } |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| pub fn sync_blocked_paths(&self) -> Result<()> { |
| let root = crate::paths::spf_root().to_string_lossy().to_string(); |
| let home = crate::paths::actual_home().to_string_lossy().to_string(); |
|
|
| let critical_paths = [ |
| |
| |
| format!("{}/CLAUDE.md", root), |
| format!("{}/LIVE/CLAUDE.md", root), |
| format!("{}/LIVE/PROJECTS/PROJECTS/CLAUDE.md", root), |
| |
| format!("{}/.claude.json", root), |
| format!("{}/.claude.json", home), |
| format!("{}/.claude/", home), |
| |
| format!("{}/LIVE/PROJECTS/PROJECTS/HARDCODE-RULES.md", root), |
| |
| format!("{}/LIVE/CONFIG/", root), |
| |
| format!("{}/src/", root), |
| format!("{}/Cargo.toml", root), |
| format!("{}/Cargo.lock", root), |
| |
| format!("{}/hooks/", root), |
| format!("{}/scripts/", root), |
| format!("{}/LIVE/CONFIG.DB", root), |
| format!("{}/LIVE/LMDB5/", root), |
| format!("{}/LIVE/SPF_FS/blobs/", root), |
| format!("{}/LIVE/state/", root), |
| format!("{}/LIVE/storage/", root), |
| |
| "/tmp".to_string(), |
| "/etc".to_string(), |
| "/usr".to_string(), |
| "/system".to_string(), |
| crate::paths::system_pkg_path(), |
| ]; |
|
|
| for path in &critical_paths { |
| self.block_path(path)?; |
| } |
|
|
| log::info!("SPF blocked paths synced: {} critical paths enforced", critical_paths.len()); |
| Ok(()) |
| } |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| pub fn sync_command_whitelist(&self) -> Result<()> { |
| |
| let rtxn = self.env.read_txn()?; |
| let count = self.commands.stat(&rtxn)?.entries; |
| drop(rtxn); |
|
|
| if count > 0 { |
| |
| return Ok(()); |
| } |
|
|
| |
| let patterns = self.list_dangerous_patterns()?; |
| if patterns.is_empty() { |
| |
| log::info!("SPF whitelist: fresh install, commands DB empty (default-deny)"); |
| return Ok(()); |
| } |
|
|
| |
| |
| |
| |
| |
|
|
| log::info!("SPF whitelist: migrating from blacklist to whitelist..."); |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| let user_fs_read: &[&str] = &[ |
| "echo", "grep", "git", "date", "uname", "whoami", "pwd", |
| "env", "which", "sort", "uniq", "tr", "cut", "jq", |
| "diff", "sha256sum", "md5sum", "basename", "dirname", "type", |
| ]; |
| for cmd in user_fs_read { |
| self.add_command("user_fs", cmd, CommandPerm::read_only())?; |
| } |
|
|
| |
| let sandbox_full: &[(&str, CommandPerm)] = &[ |
| |
| ("cargo", CommandPerm::full()), |
| ("rustc", CommandPerm::full()), |
| ("gcc", CommandPerm::full()), |
| ("make", CommandPerm::full()), |
| ("cmake", CommandPerm::full()), |
| ("npm", CommandPerm::full()), |
| ("node", CommandPerm::full()), |
| ("python", CommandPerm::full()), |
| ("python3", CommandPerm::full()), |
| ("pip", CommandPerm::full()), |
| |
| ("git", CommandPerm::read_write()), |
| |
| ("tar", CommandPerm::read_write()), |
| ("gzip", CommandPerm::read_write()), |
| ("unzip", CommandPerm::read_write()), |
| |
| ("cp", CommandPerm::read_write()), |
| ("mv", CommandPerm::read_write()), |
| ("rm", CommandPerm::read_write()), |
| ("mkdir", CommandPerm::read_write()), |
| ("touch", CommandPerm::read_write()), |
| ("chmod", CommandPerm::read_write()), |
| ("ln", CommandPerm::read_write()), |
| ("tee", CommandPerm::read_write()), |
| ("sed", CommandPerm::read_write()), |
| ("sort", CommandPerm::read_write()), |
| |
| ("find", CommandPerm { read: true, write: false, execute: true }), |
| ("awk", CommandPerm { read: true, write: false, execute: true }), |
| |
| ("cat", CommandPerm::read_only()), |
| ("head", CommandPerm::read_only()), |
| ("tail", CommandPerm::read_only()), |
| ("grep", CommandPerm::read_only()), |
| ("ls", CommandPerm::read_only()), |
| ("echo", CommandPerm::read_only()), |
| ("printf", CommandPerm::read_only()), |
| ("diff", CommandPerm::read_only()), |
| ("wc", CommandPerm::read_only()), |
| ("jq", CommandPerm::read_only()), |
| ("xxd", CommandPerm::read_only()), |
| |
| ("curl", CommandPerm::read_only()), |
| ("wget", CommandPerm::read_only()), |
| ]; |
| for (cmd, perm) in sandbox_full { |
| self.add_command("sandbox", cmd, *perm)?; |
| } |
|
|
| |
| |
| |
| let home = crate::paths::actual_home().to_string_lossy().to_string(); |
| self.add_user_fs_path(&format!("{}/", home))?; |
|
|
| log::info!( |
| "SPF whitelist: migration complete — {} user_fs cmds, {} sandbox cmds, user_fs_paths seeded", |
| user_fs_read.len(), sandbox_full.len() |
| ); |
|
|
| Ok(()) |
| } |
|
|
| |
| pub fn stats(&self) -> Result<(u64, u64, u64)> { |
| let rtxn = self.env.read_txn()?; |
| let config_stat = self.config.stat(&rtxn)?; |
| let paths_stat = self.paths.stat(&rtxn)?; |
| let patterns_stat = self.patterns.stat(&rtxn)?; |
| Ok((config_stat.entries as u64, paths_stat.entries as u64, patterns_stat.entries as u64)) |
| } |
|
|
| |
| pub fn stats_full(&self) -> Result<(u64, u64, u64, u64)> { |
| let rtxn = self.env.read_txn()?; |
| let config_stat = self.config.stat(&rtxn)?; |
| let paths_stat = self.paths.stat(&rtxn)?; |
| let patterns_stat = self.patterns.stat(&rtxn)?; |
| let commands_stat = self.commands.stat(&rtxn)?; |
| Ok((config_stat.entries as u64, paths_stat.entries as u64, |
| patterns_stat.entries as u64, commands_stat.entries as u64)) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn load_full_config(&self) -> Result<SpfConfig> { |
| |
| self.init_defaults()?; |
| self.sync_tier_approval()?; |
| |
| self.sync_blocked_paths()?; |
| |
| self.sync_command_whitelist()?; |
|
|
| |
| let path_rules = self.list_path_rules()?; |
| let mut allowed_paths = Vec::new(); |
| let mut blocked_paths = Vec::new(); |
| for (rule_type, path) in path_rules { |
| match rule_type.as_str() { |
| "allowed" => allowed_paths.push(path), |
| "blocked" => blocked_paths.push(path), |
| _ => {} |
| } |
| } |
|
|
| |
| let dangerous_commands: Vec<String> = self.list_dangerous_patterns()? |
| .into_iter() |
| .map(|(pattern, _)| pattern) |
| .collect(); |
|
|
| |
| let user_cmds = self.list_commands("user_fs")?; |
| let sandbox_cmds = self.list_commands("sandbox")?; |
| let user_fs_paths = self.list_user_fs_paths()?; |
|
|
| |
| let version = self.get("spf", "version")?.unwrap_or_else(|| "1.0.0".to_string()); |
| let require_read = self.get("spf", "require_read_before_edit")? |
| .map(|s| s == "true").unwrap_or(true); |
| let max_write = self.get("spf", "max_write_size")? |
| .and_then(|s| s.parse().ok()).unwrap_or(100_000); |
|
|
| |
| Ok(SpfConfig { |
| version, |
| enforce_mode: self.get_enforce_mode()?, |
| allowed_paths, |
| blocked_paths, |
| require_read_before_edit: require_read, |
| max_write_size: max_write, |
| tiers: self.get_tiers()?, |
| formula: self.get_formula()?, |
| complexity_weights: self.get_weights()?, |
| dangerous_commands, |
| git_force_patterns: vec![ |
| "--force".to_string(), |
| "--hard".to_string(), |
| "-f".to_string(), |
| ], |
| |
| allowed_commands_user: user_cmds.into_iter().collect(), |
| allowed_commands_sandbox: sandbox_cmds.into_iter().collect(), |
| user_fs_paths, |
| }) |
| } |
| } |
|
|