| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use std::collections::HashMap; |
|
|
| |
| |
| |
|
|
| pub const PAD_TOKEN: &str = "[PAD]"; |
| pub const BOS_TOKEN: &str = "[BOS]"; |
| pub const EOS_TOKEN: &str = "[EOS]"; |
| pub const UNK_TOKEN: &str = "[UNK]"; |
| pub const TOOL_TOKEN: &str = "[TOOL]"; |
| pub const GATE_TOKEN: &str = "[GATE]"; |
| pub const USER_TOKEN: &str = "[USER]"; |
| pub const SPF_TOKEN: &str = "[SPF]"; |
| pub const ALLOWED_TOKEN: &str = "[ALLOWED]"; |
| pub const BLOCKED_TOKEN: &str = "[BLOCKED]"; |
|
|
| pub const PAD_ID: u32 = 0; |
| pub const BOS_ID: u32 = 1; |
| pub const EOS_ID: u32 = 2; |
| pub const UNK_ID: u32 = 3; |
| pub const TOOL_ID: u32 = 4; |
| pub const GATE_ID: u32 = 5; |
| pub const USER_ID: u32 = 6; |
| pub const SPF_ID: u32 = 7; |
| pub const ALLOWED_ID: u32 = 8; |
| pub const BLOCKED_ID: u32 = 9; |
|
|
| const NUM_SPECIAL: u32 = 10; |
|
|
| |
| |
| |
|
|
| |
| #[derive(Clone)] |
| pub struct Tokenizer { |
| |
| token_to_id: HashMap<String, u32>, |
| |
| id_to_token: HashMap<u32, String>, |
| |
| merges: Vec<(String, String)>, |
| |
| pub vocab_size: u32, |
| } |
|
|
| impl Tokenizer { |
| |
| pub fn new() -> Self { |
| let mut tok = Self { |
| token_to_id: HashMap::new(), |
| id_to_token: HashMap::new(), |
| merges: Vec::new(), |
| vocab_size: NUM_SPECIAL, |
| }; |
| |
| let specials = [ |
| PAD_TOKEN, BOS_TOKEN, EOS_TOKEN, UNK_TOKEN, TOOL_TOKEN, |
| GATE_TOKEN, USER_TOKEN, SPF_TOKEN, ALLOWED_TOKEN, BLOCKED_TOKEN, |
| ]; |
| for (i, &s) in specials.iter().enumerate() { |
| tok.token_to_id.insert(s.to_string(), i as u32); |
| tok.id_to_token.insert(i as u32, s.to_string()); |
| } |
| tok |
| } |
|
|
| |
| |
| pub fn load(path: &str) -> Result<Self, String> { |
| let file_path = std::path::Path::new(path); |
| if !file_path.exists() { |
| |
| return Ok(Self::new()); |
| } |
| let content = std::fs::read_to_string(file_path) |
| .map_err(|e| format!("Read error: {}", e))?; |
| Self::from_json(&content) |
| } |
|
|
| |
| pub fn save(&self, path: &str) -> Result<(), String> { |
| let json = self.to_json(); |
| std::fs::write(path, json) |
| .map_err(|e| format!("Write error: {}", e)) |
| } |
|
|
| |
| |
| pub fn train(&mut self, corpus: &str, target_vocab: u32) { |
| let num_merges = target_vocab.saturating_sub(NUM_SPECIAL + 256); |
|
|
| |
| for b in 0u8..=255 { |
| let token = format!("{}", b as char); |
| let id = NUM_SPECIAL + b as u32; |
| self.token_to_id.insert(token.clone(), id); |
| self.id_to_token.insert(id, token); |
| } |
| self.vocab_size = NUM_SPECIAL + 256; |
|
|
| |
| let mut words: Vec<Vec<String>> = corpus |
| .split_whitespace() |
| .map(|w| w.chars().map(|c| c.to_string()).collect()) |
| .collect(); |
|
|
| |
| for _ in 0..num_merges { |
| |
| let mut pair_counts: HashMap<(String, String), u64> = HashMap::new(); |
| for word in &words { |
| for pair in word.windows(2) { |
| *pair_counts |
| .entry((pair[0].clone(), pair[1].clone())) |
| .or_insert(0) += 1; |
| } |
| } |
|
|
| if pair_counts.is_empty() { |
| break; |
| } |
|
|
| |
| let best_pair = pair_counts |
| .iter() |
| .max_by_key(|(_, &count)| count) |
| .map(|(pair, _)| pair.clone()); |
|
|
| let (left, right) = match best_pair { |
| Some(p) => p, |
| None => break, |
| }; |
|
|
| |
| let merged = format!("{}{}", left, right); |
|
|
| |
| let id = self.vocab_size; |
| self.token_to_id.insert(merged.clone(), id); |
| self.id_to_token.insert(id, merged.clone()); |
| self.merges.push((left.clone(), right.clone())); |
| self.vocab_size += 1; |
|
|
| |
| for word in &mut words { |
| let mut i = 0; |
| while i + 1 < word.len() { |
| if word[i] == left && word[i + 1] == right { |
| word[i] = merged.clone(); |
| word.remove(i + 1); |
| } else { |
| i += 1; |
| } |
| } |
| } |
| } |
| } |
|
|
| |
| pub fn encode(&self, text: &str) -> Vec<u32> { |
| let mut tokens = Vec::new(); |
|
|
| for word in text.split_whitespace() { |
| |
| if let Some(&id) = self.token_to_id.get(word) { |
| tokens.push(id); |
| continue; |
| } |
|
|
| |
| let mut chars: Vec<String> = word.chars().map(|c| c.to_string()).collect(); |
|
|
| |
| for (left, right) in &self.merges { |
| let merged = format!("{}{}", left, right); |
| let mut i = 0; |
| while i + 1 < chars.len() { |
| if chars[i] == *left && chars[i + 1] == *right { |
| chars[i] = merged.clone(); |
| chars.remove(i + 1); |
| } else { |
| i += 1; |
| } |
| } |
| } |
|
|
| |
| for ch in &chars { |
| match self.token_to_id.get(ch) { |
| Some(&id) => tokens.push(id), |
| None => tokens.push(UNK_ID), |
| } |
| } |
| } |
|
|
| tokens |
| } |
|
|
| |
| pub fn decode(&self, ids: &[u32]) -> String { |
| let mut parts = Vec::new(); |
| for &id in ids { |
| match self.id_to_token.get(&id) { |
| Some(token) => { |
| |
| match id { |
| PAD_ID | BOS_ID | EOS_ID => continue, |
| _ => parts.push(token.clone()), |
| } |
| } |
| None => parts.push(UNK_TOKEN.to_string()), |
| } |
| } |
| parts.join("") |
| } |
|
|
| |
| pub fn encode_with_special(&self, text: &str) -> Vec<u32> { |
| let mut ids = vec![BOS_ID]; |
| ids.extend(self.encode(text)); |
| ids.push(EOS_ID); |
| ids |
| } |
|
|
| |
| pub fn get_token(&self, id: u32) -> Option<&str> { |
| self.id_to_token.get(&id).map(|s| s.as_str()) |
| } |
|
|
| |
| pub fn get_id(&self, token: &str) -> Option<u32> { |
| self.token_to_id.get(token).copied() |
| } |
|
|
| |
| pub fn to_json(&self) -> String { |
| let merges_json: Vec<String> = self.merges |
| .iter() |
| .map(|(l, r)| format!("[\"{}\",\"{}\"]", l.replace('"', "\\\""), r.replace('"', "\\\""))) |
| .collect(); |
|
|
| let vocab_json: Vec<String> = self.token_to_id |
| .iter() |
| .map(|(k, v)| format!("\"{}\":{}", k.replace('"', "\\\""), v)) |
| .collect(); |
|
|
| format!( |
| "{{\"vocab_size\":{},\"merges\":[{}],\"vocab\":{{{}}}}}", |
| self.vocab_size, |
| merges_json.join(","), |
| vocab_json.join(",") |
| ) |
| } |
|
|
| |
| pub fn from_json(json: &str) -> Result<Self, String> { |
| let parsed: serde_json::Value = serde_json::from_str(json) |
| .map_err(|e| format!("Tokenizer JSON parse error: {}", e))?; |
|
|
| let mut tok = Self::new(); |
|
|
| |
| if let Some(vocab) = parsed.get("vocab").and_then(|v| v.as_object()) { |
| for (token, id) in vocab { |
| if let Some(id_num) = id.as_u64() { |
| let id32 = id_num as u32; |
| tok.token_to_id.insert(token.clone(), id32); |
| tok.id_to_token.insert(id32, token.clone()); |
| } |
| } |
| } |
|
|
| |
| if let Some(merges) = parsed.get("merges").and_then(|m| m.as_array()) { |
| for pair in merges { |
| if let Some(arr) = pair.as_array() { |
| if arr.len() == 2 { |
| if let (Some(l), Some(r)) = (arr[0].as_str(), arr[1].as_str()) { |
| tok.merges.push((l.to_string(), r.to_string())); |
| } |
| } |
| } |
| } |
| } |
|
|
| |
| if let Some(vs) = parsed.get("vocab_size").and_then(|v| v.as_u64()) { |
| tok.vocab_size = vs as u32; |
| } else { |
| tok.vocab_size = tok.token_to_id.len() as u32; |
| } |
|
|
| Ok(tok) |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_special_tokens() { |
| let tok = Tokenizer::new(); |
| assert_eq!(tok.get_id(PAD_TOKEN), Some(PAD_ID)); |
| assert_eq!(tok.get_id(BOS_TOKEN), Some(BOS_ID)); |
| assert_eq!(tok.get_id(TOOL_TOKEN), Some(TOOL_ID)); |
| assert_eq!(tok.get_id(GATE_TOKEN), Some(GATE_ID)); |
| assert_eq!(tok.get_token(SPF_ID), Some(SPF_TOKEN)); |
| } |
|
|
| #[test] |
| fn test_train_and_encode() { |
| let mut tok = Tokenizer::new(); |
| let corpus = "the cat sat on the mat the cat the mat"; |
| tok.train(corpus, 300); |
|
|
| let ids = tok.encode("the cat"); |
| assert!(!ids.is_empty()); |
| assert!(ids.iter().all(|&id| id != UNK_ID)); |
| } |
|
|
| #[test] |
| fn test_roundtrip() { |
| let mut tok = Tokenizer::new(); |
| tok.train("hello world hello world", 300); |
|
|
| let text = "hello"; |
| let ids = tok.encode(text); |
| let decoded = tok.decode(&ids); |
| assert_eq!(decoded, text); |
| } |
|
|
| #[test] |
| fn test_encode_with_special() { |
| let mut tok = Tokenizer::new(); |
| tok.train("test data", 300); |
|
|
| let ids = tok.encode_with_special("test"); |
| assert_eq!(ids[0], BOS_ID); |
| assert_eq!(*ids.last().unwrap(), EOS_ID); |
| } |
|
|
| #[test] |
| fn test_unknown_token() { |
| let tok = Tokenizer::new(); |
| |
| |
| assert_eq!(tok.vocab_size, NUM_SPECIAL); |
| } |
|
|
| #[test] |
| fn test_json_roundtrip() { |
| let mut tok = Tokenizer::new(); |
| tok.train("the cat sat on the mat", 300); |
|
|
| let json = tok.to_json(); |
| let tok2 = Tokenizer::from_json(&json).unwrap(); |
|
|
| assert_eq!(tok.vocab_size, tok2.vocab_size); |
| assert_eq!(tok.merges.len(), tok2.merges.len()); |
|
|
| let ids1 = tok.encode("the cat"); |
| let ids2 = tok2.encode("the cat"); |
| assert_eq!(ids1, ids2); |
| } |
| } |
|
|