SPFsmartGATE / src /tokenizer.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - BPE Tokenizer
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// Byte-Pair Encoding tokenizer for SPF Transformer.
// Trains on SPF corpus (brain data, source code, rules).
// Pure Rust. No external tokenizer dependencies.
//
// Depends on: nothing (Layer 0)
use std::collections::HashMap;
// ============================================================================
// SPECIAL TOKENS
// ============================================================================
pub const PAD_TOKEN: &str = "[PAD]";
pub const BOS_TOKEN: &str = "[BOS]";
pub const EOS_TOKEN: &str = "[EOS]";
pub const UNK_TOKEN: &str = "[UNK]";
pub const TOOL_TOKEN: &str = "[TOOL]";
pub const GATE_TOKEN: &str = "[GATE]";
pub const USER_TOKEN: &str = "[USER]";
pub const SPF_TOKEN: &str = "[SPF]";
pub const ALLOWED_TOKEN: &str = "[ALLOWED]";
pub const BLOCKED_TOKEN: &str = "[BLOCKED]";
pub const PAD_ID: u32 = 0;
pub const BOS_ID: u32 = 1;
pub const EOS_ID: u32 = 2;
pub const UNK_ID: u32 = 3;
pub const TOOL_ID: u32 = 4;
pub const GATE_ID: u32 = 5;
pub const USER_ID: u32 = 6;
pub const SPF_ID: u32 = 7;
pub const ALLOWED_ID: u32 = 8;
pub const BLOCKED_ID: u32 = 9;
const NUM_SPECIAL: u32 = 10;
// ============================================================================
// BPE TOKENIZER
// ============================================================================
/// Byte-Pair Encoding tokenizer with SPF-specific special tokens
#[derive(Clone)]
pub struct Tokenizer {
/// Token string → ID
token_to_id: HashMap<String, u32>,
/// ID → token string
id_to_token: HashMap<u32, String>,
/// Merge rules: (pair) → merged token, ordered by priority
merges: Vec<(String, String)>,
/// Vocabulary size
pub vocab_size: u32,
}
impl Tokenizer {
/// Create a new tokenizer with only special tokens (untrained)
pub fn new() -> Self {
let mut tok = Self {
token_to_id: HashMap::new(),
id_to_token: HashMap::new(),
merges: Vec::new(),
vocab_size: NUM_SPECIAL,
};
// Register special tokens
let specials = [
PAD_TOKEN, BOS_TOKEN, EOS_TOKEN, UNK_TOKEN, TOOL_TOKEN,
GATE_TOKEN, USER_TOKEN, SPF_TOKEN, ALLOWED_TOKEN, BLOCKED_TOKEN,
];
for (i, &s) in specials.iter().enumerate() {
tok.token_to_id.insert(s.to_string(), i as u32);
tok.id_to_token.insert(i as u32, s.to_string());
}
tok
}
/// Load tokenizer vocabulary from JSON file.
/// Falls back to untrained tokenizer if file doesn't exist.
pub fn load(path: &str) -> Result<Self, String> {
let file_path = std::path::Path::new(path);
if !file_path.exists() {
// No trained tokenizer yet — use untrained (special tokens + byte fallback)
return Ok(Self::new());
}
let content = std::fs::read_to_string(file_path)
.map_err(|e| format!("Read error: {}", e))?;
Self::from_json(&content)
}
/// Save tokenizer vocabulary to JSON file.
pub fn save(&self, path: &str) -> Result<(), String> {
let json = self.to_json();
std::fs::write(path, json)
.map_err(|e| format!("Write error: {}", e))
}
/// Train BPE on a corpus of text. Learns `num_merges` merge rules.
/// `target_vocab` is the desired vocabulary size (including special tokens).
pub fn train(&mut self, corpus: &str, target_vocab: u32) {
let num_merges = target_vocab.saturating_sub(NUM_SPECIAL + 256); // 256 byte-level tokens
// Step 1: Initialize vocabulary with all byte values
for b in 0u8..=255 {
let token = format!("{}", b as char);
let id = NUM_SPECIAL + b as u32;
self.token_to_id.insert(token.clone(), id);
self.id_to_token.insert(id, token);
}
self.vocab_size = NUM_SPECIAL + 256;
// Step 2: Tokenize corpus into byte-level tokens
let mut words: Vec<Vec<String>> = corpus
.split_whitespace()
.map(|w| w.chars().map(|c| c.to_string()).collect())
.collect();
// Step 3: Iteratively merge most frequent pairs
for _ in 0..num_merges {
// Count all adjacent pairs
let mut pair_counts: HashMap<(String, String), u64> = HashMap::new();
for word in &words {
for pair in word.windows(2) {
*pair_counts
.entry((pair[0].clone(), pair[1].clone()))
.or_insert(0) += 1;
}
}
if pair_counts.is_empty() {
break;
}
// Find most frequent pair
let best_pair = pair_counts
.iter()
.max_by_key(|(_, &count)| count)
.map(|(pair, _)| pair.clone());
let (left, right) = match best_pair {
Some(p) => p,
None => break,
};
// Create merged token
let merged = format!("{}{}", left, right);
// Register in vocabulary
let id = self.vocab_size;
self.token_to_id.insert(merged.clone(), id);
self.id_to_token.insert(id, merged.clone());
self.merges.push((left.clone(), right.clone()));
self.vocab_size += 1;
// Apply merge to all words
for word in &mut words {
let mut i = 0;
while i + 1 < word.len() {
if word[i] == left && word[i + 1] == right {
word[i] = merged.clone();
word.remove(i + 1);
} else {
i += 1;
}
}
}
}
}
/// Encode text to token IDs
pub fn encode(&self, text: &str) -> Vec<u32> {
let mut tokens = Vec::new();
for word in text.split_whitespace() {
// Check if it's a special token
if let Some(&id) = self.token_to_id.get(word) {
tokens.push(id);
continue;
}
// Byte-level tokenization
let mut chars: Vec<String> = word.chars().map(|c| c.to_string()).collect();
// Apply merges in order
for (left, right) in &self.merges {
let merged = format!("{}{}", left, right);
let mut i = 0;
while i + 1 < chars.len() {
if chars[i] == *left && chars[i + 1] == *right {
chars[i] = merged.clone();
chars.remove(i + 1);
} else {
i += 1;
}
}
}
// Convert to IDs
for ch in &chars {
match self.token_to_id.get(ch) {
Some(&id) => tokens.push(id),
None => tokens.push(UNK_ID),
}
}
}
tokens
}
/// Decode token IDs back to text
pub fn decode(&self, ids: &[u32]) -> String {
let mut parts = Vec::new();
for &id in ids {
match self.id_to_token.get(&id) {
Some(token) => {
// Skip special tokens in output (except content ones)
match id {
PAD_ID | BOS_ID | EOS_ID => continue,
_ => parts.push(token.clone()),
}
}
None => parts.push(UNK_TOKEN.to_string()),
}
}
parts.join("")
}
/// Encode with BOS/EOS wrapping
pub fn encode_with_special(&self, text: &str) -> Vec<u32> {
let mut ids = vec![BOS_ID];
ids.extend(self.encode(text));
ids.push(EOS_ID);
ids
}
/// Get token string for an ID
pub fn get_token(&self, id: u32) -> Option<&str> {
self.id_to_token.get(&id).map(|s| s.as_str())
}
/// Get ID for a token string
pub fn get_id(&self, token: &str) -> Option<u32> {
self.token_to_id.get(token).copied()
}
/// Serialize tokenizer state to JSON
pub fn to_json(&self) -> String {
let merges_json: Vec<String> = self.merges
.iter()
.map(|(l, r)| format!("[\"{}\",\"{}\"]", l.replace('"', "\\\""), r.replace('"', "\\\"")))
.collect();
let vocab_json: Vec<String> = self.token_to_id
.iter()
.map(|(k, v)| format!("\"{}\":{}", k.replace('"', "\\\""), v))
.collect();
format!(
"{{\"vocab_size\":{},\"merges\":[{}],\"vocab\":{{{}}}}}",
self.vocab_size,
merges_json.join(","),
vocab_json.join(",")
)
}
/// Deserialize tokenizer from JSON string
pub fn from_json(json: &str) -> Result<Self, String> {
let parsed: serde_json::Value = serde_json::from_str(json)
.map_err(|e| format!("Tokenizer JSON parse error: {}", e))?;
let mut tok = Self::new();
// Load vocab
if let Some(vocab) = parsed.get("vocab").and_then(|v| v.as_object()) {
for (token, id) in vocab {
if let Some(id_num) = id.as_u64() {
let id32 = id_num as u32;
tok.token_to_id.insert(token.clone(), id32);
tok.id_to_token.insert(id32, token.clone());
}
}
}
// Load merges
if let Some(merges) = parsed.get("merges").and_then(|m| m.as_array()) {
for pair in merges {
if let Some(arr) = pair.as_array() {
if arr.len() == 2 {
if let (Some(l), Some(r)) = (arr[0].as_str(), arr[1].as_str()) {
tok.merges.push((l.to_string(), r.to_string()));
}
}
}
}
}
// Load vocab size
if let Some(vs) = parsed.get("vocab_size").and_then(|v| v.as_u64()) {
tok.vocab_size = vs as u32;
} else {
tok.vocab_size = tok.token_to_id.len() as u32;
}
Ok(tok)
}
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_special_tokens() {
let tok = Tokenizer::new();
assert_eq!(tok.get_id(PAD_TOKEN), Some(PAD_ID));
assert_eq!(tok.get_id(BOS_TOKEN), Some(BOS_ID));
assert_eq!(tok.get_id(TOOL_TOKEN), Some(TOOL_ID));
assert_eq!(tok.get_id(GATE_TOKEN), Some(GATE_ID));
assert_eq!(tok.get_token(SPF_ID), Some(SPF_TOKEN));
}
#[test]
fn test_train_and_encode() {
let mut tok = Tokenizer::new();
let corpus = "the cat sat on the mat the cat the mat";
tok.train(corpus, 300);
let ids = tok.encode("the cat");
assert!(!ids.is_empty());
assert!(ids.iter().all(|&id| id != UNK_ID));
}
#[test]
fn test_roundtrip() {
let mut tok = Tokenizer::new();
tok.train("hello world hello world", 300);
let text = "hello";
let ids = tok.encode(text);
let decoded = tok.decode(&ids);
assert_eq!(decoded, text);
}
#[test]
fn test_encode_with_special() {
let mut tok = Tokenizer::new();
tok.train("test data", 300);
let ids = tok.encode_with_special("test");
assert_eq!(ids[0], BOS_ID);
assert_eq!(*ids.last().unwrap(), EOS_ID);
}
#[test]
fn test_unknown_token() {
let tok = Tokenizer::new(); // untrained — only specials + no bytes
// With an untrained tokenizer, byte-level chars might not exist
// This tests the UNK fallback path
assert_eq!(tok.vocab_size, NUM_SPECIAL);
}
#[test]
fn test_json_roundtrip() {
let mut tok = Tokenizer::new();
tok.train("the cat sat on the mat", 300);
let json = tok.to_json();
let tok2 = Tokenizer::from_json(&json).unwrap();
assert_eq!(tok.vocab_size, tok2.vocab_size);
assert_eq!(tok.merges.len(), tok2.merges.len());
let ids1 = tok.encode("the cat");
let ids2 = tok2.encode("the cat");
assert_eq!(ids1, ids2);
}
}