| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use serde::{Deserialize, Serialize}; |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| pub static VOICE_SESSION: std::sync::Mutex<Option<VoiceSession>> = std::sync::Mutex::new(None); |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)] |
| pub enum VoiceMode { |
| |
| Light, |
| |
| Rich, |
| } |
|
|
| impl Default for VoiceMode { |
| fn default() -> Self { Self::Light } |
| } |
|
|
| |
| #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] |
| pub struct VoiceSettings { |
| |
| pub agent_tts_mode: VoiceMode, |
| |
| pub agent_stt_mode: VoiceMode, |
| |
| pub peer_quality: VoiceMode, |
| |
| pub auto_accept_calls: bool, |
| |
| pub idle_timeout_secs: u32, |
| } |
|
|
| impl Default for VoiceSettings { |
| fn default() -> Self { |
| Self { |
| agent_tts_mode: VoiceMode::Light, |
| agent_stt_mode: VoiceMode::Light, |
| peer_quality: VoiceMode::Light, |
| auto_accept_calls: false, |
| idle_timeout_secs: 300, |
| } |
| } |
| } |
|
|
| |
| #[derive(Debug, Clone, PartialEq)] |
| pub enum CallDirection { |
| |
| Outgoing, |
| |
| Incoming, |
| } |
|
|
| |
| #[derive(Debug, Clone, PartialEq)] |
| pub enum CallPhase { |
| |
| Ringing, |
| |
| Active, |
| |
| Ended, |
| } |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct CallState { |
| |
| pub peer_key: String, |
| |
| pub peer_name: String, |
| |
| pub direction: CallDirection, |
| |
| pub phase: CallPhase, |
| |
| pub started_at: std::time::Instant, |
| } |
|
|
| impl CallState { |
| |
| pub fn is_active(&self) -> bool { |
| matches!(self.phase, CallPhase::Active) |
| } |
| |
| pub fn is_ringing(&self) -> bool { |
| matches!(self.phase, CallPhase::Ringing) |
| } |
| |
| pub fn is_in_progress(&self) -> bool { |
| matches!(self.phase, CallPhase::Ringing | CallPhase::Active) |
| } |
| |
| pub fn elapsed_secs(&self) -> u64 { |
| self.started_at.elapsed().as_secs() |
| } |
| } |
|
|
| |
| pub static VOICE_SETTINGS: std::sync::Mutex<VoiceSettings> = std::sync::Mutex::new(VoiceSettings { |
| agent_tts_mode: VoiceMode::Light, |
| agent_stt_mode: VoiceMode::Light, |
| peer_quality: VoiceMode::Light, |
| auto_accept_calls: false, |
| idle_timeout_secs: 300, |
| }); |
|
|
| |
| pub static CALL_STATE: std::sync::Mutex<Option<CallState>> = std::sync::Mutex::new(None); |
|
|
| |
| |
| |
| pub(crate) mod spf_voice { |
| |
| pub struct Status { |
| pub tts_available: bool, |
| pub input_available: bool, |
| pub output_available: bool, |
| pub codec_available: bool, |
| pub pipeline_open: bool, |
| } |
|
|
| |
| pub fn open() -> Result<(), String> { Ok(()) } |
|
|
| |
| pub fn close() {} |
|
|
| |
| pub fn listen(_ms: u64) -> Result<Vec<i16>, String> { Ok(vec![]) } |
|
|
| |
| pub fn speak(_text: &str) -> Result<(), String> { |
| Err("spf-voice not available (WB-3 stub)".into()) |
| } |
|
|
| |
| pub fn encode(_pcm: &[i16]) -> Result<Vec<u8>, String> { |
| Err("spf-voice not available (WB-3 stub)".into()) |
| } |
|
|
| |
| pub fn decode(_data: &[u8]) -> Result<Vec<i16>, String> { |
| Err("spf-voice not available (WB-3 stub)".into()) |
| } |
|
|
| |
| pub fn play(_pcm: &[i16]) -> Result<(), String> { |
| Err("spf-voice not available (WB-3 stub)".into()) |
| } |
|
|
| |
| pub fn status() -> Status { |
| Status { |
| tts_available: false, |
| input_available: false, |
| output_available: false, |
| codec_available: false, |
| pipeline_open: false, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct VoiceFrame { |
| |
| pub codec: String, |
| |
| pub sample_rate: u32, |
| |
| pub channels: u8, |
| |
| pub frame_duration_ms: u16, |
| |
| pub data: Vec<u8>, |
| |
| pub seq: u64, |
| |
| pub from: String, |
| |
| pub timestamp: String, |
| |
| |
| #[serde(default)] |
| pub team_id: Option<String>, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct VoiceConfig { |
| |
| pub codec: String, |
| |
| pub sample_rate: u32, |
| |
| pub channels: u8, |
| |
| pub frame_duration_ms: u16, |
| |
| pub bitrate: u32, |
| |
| pub vad_enabled: bool, |
| } |
|
|
| impl Default for VoiceConfig { |
| fn default() -> Self { |
| Self { |
| codec: "opus".to_string(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| bitrate: 24000, |
| vad_enabled: true, |
| } |
| } |
| } |
|
|
| impl VoiceConfig { |
| |
| |
| pub fn pcm_frame_bytes(&self) -> usize { |
| (self.sample_rate as usize * self.channels as usize * 2 |
| * self.frame_duration_ms as usize) / 1000 |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub trait AudioInput: Send + Sync { |
| |
| fn read_frame(&mut self) -> Result<Vec<u8>, VoiceError>; |
| |
| fn is_available(&self) -> bool; |
| |
| fn config(&self) -> &VoiceConfig; |
| } |
|
|
| |
| pub trait AudioOutput: Send + Sync { |
| |
| fn write_frame(&mut self, pcm_data: &[u8]) -> Result<(), VoiceError>; |
| |
| fn is_available(&self) -> bool; |
| |
| fn config(&self) -> &VoiceConfig; |
| } |
|
|
| |
| pub trait SpeechToText: Send + Sync { |
| |
| fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError>; |
| |
| fn is_available(&self) -> bool; |
| |
| fn supported_languages(&self) -> Vec<String>; |
| } |
|
|
| |
| pub trait TextToSpeech: Send + Sync { |
| |
| fn synthesize(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError>; |
| |
| fn is_available(&self) -> bool; |
| |
| fn available_voices(&self) -> Vec<String>; |
| } |
|
|
| |
| #[derive(Debug, Clone)] |
| pub enum VoiceError { |
| |
| NotAvailable(String), |
| |
| CodecError(String), |
| |
| DeviceError(String), |
| |
| ConfigError(String), |
| } |
|
|
| impl std::fmt::Display for VoiceError { |
| fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { |
| match self { |
| VoiceError::NotAvailable(msg) => write!(f, "Not available: {}", msg), |
| VoiceError::CodecError(msg) => write!(f, "Codec error: {}", msg), |
| VoiceError::DeviceError(msg) => write!(f, "Device error: {}", msg), |
| VoiceError::ConfigError(msg) => write!(f, "Config error: {}", msg), |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct StubAudioInput { |
| config: VoiceConfig, |
| } |
|
|
| impl StubAudioInput { |
| pub fn new() -> Self { |
| Self { config: VoiceConfig::default() } |
| } |
| } |
|
|
| impl AudioInput for StubAudioInput { |
| fn read_frame(&mut self) -> Result<Vec<u8>, VoiceError> { |
| Err(VoiceError::NotAvailable("Audio input not implemented".into())) |
| } |
|
|
| fn is_available(&self) -> bool { |
| false |
| } |
|
|
| fn config(&self) -> &VoiceConfig { |
| &self.config |
| } |
| } |
|
|
| |
| pub struct StubAudioOutput { |
| config: VoiceConfig, |
| } |
|
|
| impl StubAudioOutput { |
| pub fn new() -> Self { |
| Self { config: VoiceConfig::default() } |
| } |
| } |
|
|
| impl AudioOutput for StubAudioOutput { |
| fn write_frame(&mut self, _pcm_data: &[u8]) -> Result<(), VoiceError> { |
| Err(VoiceError::NotAvailable("Audio output not implemented".into())) |
| } |
|
|
| fn is_available(&self) -> bool { |
| false |
| } |
|
|
| fn config(&self) -> &VoiceConfig { |
| &self.config |
| } |
| } |
|
|
| |
| pub struct StubSTT; |
|
|
| impl SpeechToText for StubSTT { |
| fn transcribe(&mut self, _frames: &[VoiceFrame]) -> Result<String, VoiceError> { |
| Err(VoiceError::NotAvailable("Speech-to-text not implemented".into())) |
| } |
|
|
| fn is_available(&self) -> bool { |
| false |
| } |
|
|
| fn supported_languages(&self) -> Vec<String> { |
| vec![] |
| } |
| } |
|
|
| |
| pub struct StubTTS; |
|
|
| impl TextToSpeech for StubTTS { |
| fn synthesize(&mut self, _text: &str) -> Result<Vec<VoiceFrame>, VoiceError> { |
| Err(VoiceError::NotAvailable("Text-to-speech not implemented".into())) |
| } |
|
|
| fn is_available(&self) -> bool { |
| false |
| } |
|
|
| fn available_voices(&self) -> Vec<String> { |
| vec![] |
| } |
| } |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| #[cfg(feature = "voice-stt")] |
| pub struct VoiceTokenizer { |
| id_to_token: std::collections::HashMap<u32, String>, |
| token_to_id_map: std::collections::HashMap<String, u32>, |
| unicode_to_byte: std::collections::HashMap<char, u8>, |
| special_ids: std::collections::HashSet<u32>, |
| } |
|
|
| #[cfg(feature = "voice-stt")] |
| impl VoiceTokenizer { |
| |
| pub fn from_file(vocab_path: &std::path::Path) -> Result<Self, VoiceError> { |
| let json = std::fs::read_to_string(vocab_path) |
| .map_err(|e| VoiceError::ConfigError(format!("vocab.json read: {}", e)))?; |
| let mut token_to_id_map: std::collections::HashMap<String, u32> = |
| serde_json::from_str(&json) |
| .map_err(|e| VoiceError::ConfigError(format!("vocab.json parse: {}", e)))?; |
|
|
| |
| if let Some(parent) = vocab_path.parent() { |
| let added_path = parent.join("added_tokens.json"); |
| if let Ok(added_json) = std::fs::read_to_string(&added_path) { |
| if let Ok(added) = serde_json::from_str::<std::collections::HashMap<String, u32>>(&added_json) { |
| token_to_id_map.extend(added); |
| } |
| } |
| } |
|
|
| let unicode_to_byte = Self::build_unicode_to_byte(); |
| let mut special_ids = std::collections::HashSet::new(); |
| let id_to_token: std::collections::HashMap<u32, String> = token_to_id_map |
| .iter() |
| .map(|(tok, &id)| { |
| if tok.starts_with("<|") && tok.ends_with("|>") { |
| special_ids.insert(id); |
| } |
| (id, tok.clone()) |
| }) |
| .collect(); |
|
|
| Ok(Self { id_to_token, token_to_id_map, unicode_to_byte, special_ids }) |
| } |
|
|
| |
| pub fn token_to_id(&self, token: &str) -> Option<u32> { |
| self.token_to_id_map.get(token).copied() |
| } |
|
|
| |
| pub fn decode(&self, ids: &[u32], skip_special: bool) -> String { |
| let bytes: Vec<u8> = ids |
| .iter() |
| .filter(|&&id| !skip_special || !self.special_ids.contains(&id)) |
| .filter_map(|id| self.id_to_token.get(id)) |
| .flat_map(|tok| { |
| tok.chars().filter_map(|c| self.unicode_to_byte.get(&c).copied()) |
| }) |
| .collect(); |
| String::from_utf8_lossy(&bytes).trim().to_string() |
| } |
|
|
| |
| |
| |
| fn build_unicode_to_byte() -> std::collections::HashMap<char, u8> { |
| let visible: Vec<u8> = (b'!'..=b'~') |
| .chain(b'\xa1'..=b'\xac') |
| .chain(b'\xae'..=b'\xff') |
| .collect(); |
| let visible_set: std::collections::HashSet<u8> = visible.iter().copied().collect(); |
|
|
| let mut map = std::collections::HashMap::with_capacity(256); |
| |
| for &b in &visible { |
| map.insert(b as char, b); |
| } |
| |
| let mut cp = 256u32; |
| for b in 0u8..=255 { |
| if !visible_set.contains(&b) { |
| if let Some(c) = char::from_u32(cp) { |
| map.insert(c, b); |
| } |
| cp += 1; |
| } |
| } |
| map |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
|
|
| #[cfg(feature = "voice-stt")] |
| pub struct CandleWhisperSTT { |
| model: candle_transformers::models::whisper::model::Whisper, |
| tokenizer: VoiceTokenizer, |
| mel_filters: Vec<f32>, |
| config: candle_transformers::models::whisper::Config, |
| device: candle_core::Device, |
| eot_token: u32, |
| sot_token: u32, |
| transcribe_token: u32, |
| no_timestamps_token: u32, |
| } |
|
|
| |
| |
| #[cfg(feature = "voice-stt")] |
| unsafe impl Send for CandleWhisperSTT {} |
|
|
| #[cfg(feature = "voice-stt")] |
| impl CandleWhisperSTT { |
| |
| |
| pub fn new(model_dir: &std::path::Path) -> Result<Self, VoiceError> { |
| |
| let config_str = std::fs::read_to_string(model_dir.join("config.json")) |
| .map_err(|e| VoiceError::DeviceError(format!("Whisper config.json: {}", e)))?; |
| let config: candle_transformers::models::whisper::Config = |
| serde_json::from_str(&config_str) |
| .map_err(|e| VoiceError::ConfigError(format!("Whisper config parse: {}", e)))?; |
|
|
| |
| let tokenizer = VoiceTokenizer::from_file(&model_dir.join("vocab.json"))?; |
|
|
| let eot_token = tokenizer |
| .token_to_id(candle_transformers::models::whisper::EOT_TOKEN) |
| .ok_or_else(|| VoiceError::ConfigError("EOT token missing".into()))?; |
| let sot_token = tokenizer |
| .token_to_id(candle_transformers::models::whisper::SOT_TOKEN) |
| .ok_or_else(|| VoiceError::ConfigError("SOT token missing".into()))?; |
| let transcribe_token = tokenizer |
| .token_to_id(candle_transformers::models::whisper::TRANSCRIBE_TOKEN) |
| .ok_or_else(|| VoiceError::ConfigError("TRANSCRIBE token missing".into()))?; |
| let no_timestamps_token = tokenizer |
| .token_to_id(candle_transformers::models::whisper::NO_TIMESTAMPS_TOKEN) |
| .ok_or_else(|| VoiceError::ConfigError("NO_TIMESTAMPS token missing".into()))?; |
|
|
| |
| |
| let mel_bytes = std::fs::read(model_dir.join("melfilters.bytes")) |
| .map_err(|e| VoiceError::DeviceError(format!("melfilters.bytes: {}", e)))?; |
| let mel_filters: Vec<f32> = mel_bytes |
| .chunks_exact(4) |
| .map(|b| f32::from_le_bytes(b.try_into().unwrap())) |
| .collect(); |
|
|
| |
| let device = candle_core::Device::Cpu; |
| let vb = unsafe { |
| candle_nn::VarBuilder::from_mmaped_safetensors( |
| &[model_dir.join("model.safetensors")], |
| candle_transformers::models::whisper::DTYPE, |
| &device, |
| ) |
| .map_err(|e| VoiceError::DeviceError(format!("Weights load: {}", e)))? |
| }; |
| let model = |
| candle_transformers::models::whisper::model::Whisper::load(&vb, config.clone()) |
| .map_err(|e| VoiceError::DeviceError(format!("Model init: {}", e)))?; |
|
|
| eprintln!("[SPF-STT] Candle Whisper loaded from {:?}", model_dir); |
| Ok(Self { |
| model, |
| tokenizer, |
| mel_filters, |
| config, |
| device, |
| eot_token, |
| sot_token, |
| transcribe_token, |
| no_timestamps_token, |
| }) |
| } |
|
|
| |
| pub fn audio_to_mel(&self, samples: &[f32]) -> Result<candle_core::Tensor, VoiceError> { |
| let mel = candle_transformers::models::whisper::audio::pcm_to_mel( |
| &self.config, |
| samples, |
| &self.mel_filters, |
| ); |
| let mel_len = mel.len(); |
| let n_mel = self.config.num_mel_bins; |
| candle_core::Tensor::from_vec(mel, (1, n_mel, mel_len / n_mel), &self.device) |
| .map_err(|e| VoiceError::DeviceError(format!("Mel tensor: {}", e))) |
| } |
|
|
| |
| pub fn decode_segment(&mut self, mel: &candle_core::Tensor) -> Result<String, VoiceError> { |
| let audio_features = self.model.encoder.forward(mel, true) |
| .map_err(|e| VoiceError::DeviceError(format!("Encoder: {}", e)))?; |
|
|
| |
| let mut tokens: Vec<u32> = |
| vec![self.sot_token, self.transcribe_token, self.no_timestamps_token]; |
| let max_tokens = self.config.max_target_positions; |
|
|
| for i in 0..max_tokens { |
| let tokens_t = candle_core::Tensor::new(tokens.as_slice(), &self.device) |
| .and_then(|t| t.unsqueeze(0)) |
| .map_err(|e| VoiceError::DeviceError(format!("Token tensor: {}", e)))?; |
|
|
| let ys = self.model.decoder.forward(&tokens_t, &audio_features, i == 0) |
| .map_err(|e| VoiceError::DeviceError(format!("Decoder step {}: {}", i, e)))?; |
|
|
| let (_, seq_len, _) = ys.dims3() |
| .map_err(|e| VoiceError::DeviceError(format!("dims3: {}", e)))?; |
| if seq_len == 0 { break; } |
|
|
| let logits = self.model.decoder |
| .final_linear(&ys.i((..1, seq_len - 1..)) |
| .map_err(|e| VoiceError::DeviceError(format!("Slice: {}", e)))?) |
| .and_then(|t| t.i(0)) |
| .and_then(|t| t.i(0)) |
| .map_err(|e| VoiceError::DeviceError(format!("Logits: {}", e)))?; |
|
|
| let logits_v: Vec<f32> = logits.to_vec1() |
| .map_err(|e| VoiceError::DeviceError(format!("to_vec1: {}", e)))?; |
|
|
| let next_token = logits_v |
| .iter() |
| .enumerate() |
| .max_by(|(_, a), (_, b)| a.total_cmp(b)) |
| .map(|(idx, _)| idx as u32) |
| .unwrap_or(self.eot_token); |
|
|
| if next_token == self.eot_token || tokens.len() >= max_tokens { |
| break; |
| } |
| tokens.push(next_token); |
| } |
|
|
| let text = self.tokenizer.decode(&tokens, true); |
| Ok(text) |
| } |
| } |
|
|
| #[cfg(feature = "voice-stt")] |
| impl SpeechToText for CandleWhisperSTT { |
| fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError> { |
| if frames.is_empty() { |
| return Ok(String::new()); |
| } |
| |
| let samples: Vec<f32> = frames |
| .iter() |
| .flat_map(|f| f.data.chunks_exact(2)) |
| .map(|pair| i16::from_le_bytes([pair[0], pair[1]]) as f32 / 32768.0) |
| .collect(); |
| let mel = self.audio_to_mel(&samples)?; |
| self.decode_segment(&mel) |
| } |
|
|
| fn is_available(&self) -> bool { true } |
|
|
| fn supported_languages(&self) -> Vec<String> { |
| vec![ |
| "en".into(), "es".into(), "fr".into(), "de".into(), "it".into(), |
| "pt".into(), "nl".into(), "ja".into(), "ko".into(), "zh".into(), |
| "ru".into(), "ar".into(), "hi".into(), "pl".into(), "sv".into(), |
| ] |
| } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| pub static TRANSCRIPT_QUEUE: std::sync::Mutex<Vec<String>> = |
| std::sync::Mutex::new(Vec::new()); |
|
|
| |
| |
| pub static STREAM_ACTIVE: std::sync::atomic::AtomicBool = |
| std::sync::atomic::AtomicBool::new(false); |
|
|
| |
| |
| |
| #[cfg(feature = "voice-stt")] |
| pub fn stream_on(model_dir: std::path::PathBuf) -> Result<(), VoiceError> { |
| if STREAM_ACTIVE.load(std::sync::atomic::Ordering::SeqCst) { |
| return Err(VoiceError::DeviceError( |
| "Stream already active — call stream_off first".into(), |
| )); |
| } |
| |
| let mut stt = CandleWhisperSTT::new(&model_dir)?; |
|
|
| STREAM_ACTIVE.store(true, std::sync::atomic::Ordering::SeqCst); |
| |
| if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { q.clear(); } |
|
|
| std::thread::spawn(move || { |
| eprintln!("[SPF-STREAM] Streaming started — via spf-voice, 16 kHz mono, continuous"); |
|
|
| |
| const CHUNK_MS: u64 = 10_000; |
|
|
| loop { |
| if !STREAM_ACTIVE.load(std::sync::atomic::Ordering::SeqCst) { break; } |
|
|
| |
| match spf_voice::listen(CHUNK_MS) { |
| Ok(pcm_i16) => { |
| if pcm_i16.is_empty() { continue; } |
|
|
| |
| let f32_samples: Vec<f32> = pcm_i16.iter() |
| .map(|&s| s as f32 / 32768.0) |
| .collect(); |
|
|
| match stt.audio_to_mel(&f32_samples) { |
| Ok(mel) => match stt.decode_segment(&mel) { |
| Ok(text) if !text.is_empty() => { |
| eprintln!("[SPF-STREAM] → {}", text); |
| let lower = text.to_lowercase(); |
| let trimmed = lower.trim().trim_matches(|c: char| !c.is_alphanumeric() && c != ' '); |
|
|
| |
| let is_end = trimmed.contains("end stream") || trimmed.contains("stop stream"); |
| let is_enter = trimmed == "enter" || trimmed.ends_with(" enter") |
| || trimmed == "new line" || trimmed == "newline"; |
| let is_reply = trimmed == "reply" || trimmed.ends_with(" reply"); |
| let is_clear = trimmed == "clear" || trimmed.starts_with("clear "); |
| let is_read_back = trimmed.contains("read back") || trimmed.contains("read it back"); |
| let is_start = trimmed.contains("start stream"); |
| let is_command = is_end || is_enter || is_reply || is_clear || is_read_back || is_start; |
|
|
| if is_command { |
| |
| if is_clear { |
| if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { q.clear(); } |
| eprintln!("[SPF-STREAM] CMD: clear — buffer cleared"); |
| } else if is_read_back { |
| let current = TRANSCRIPT_QUEUE.lock() |
| .map(|q| q.join(" ")).unwrap_or_default(); |
| if !current.is_empty() { |
| let _ = spf_voice::speak(&format!("You said: {}", current)); |
| } else { |
| let _ = spf_voice::speak("Nothing to read back"); |
| } |
| eprintln!("[SPF-STREAM] CMD: read back"); |
| } else if is_reply { |
| if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { |
| q.push("[REPLY]".to_string()); |
| } |
| eprintln!("[SPF-STREAM] CMD: reply — marker pushed"); |
| } else if is_enter { |
| if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { |
| q.push("\n".to_string()); |
| } |
| eprintln!("[SPF-STREAM] CMD: enter — newline pushed"); |
| } else if is_start { |
| eprintln!("[SPF-STREAM] CMD: start stream — already running"); |
| } |
| if is_end { |
| eprintln!("[SPF-STREAM] CMD: end stream — stopping"); |
| STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst); |
| break; |
| } |
| } else { |
| |
| if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { |
| q.push(text); |
| } |
| } |
| } |
| Ok(_) => {} |
| Err(e) => eprintln!("[SPF-STREAM] Transcribe error: {}", e), |
| }, |
| Err(e) => eprintln!("[SPF-STREAM] Mel error: {}", e), |
| } |
| } |
| Err(e) => { |
| eprintln!("[SPF-STREAM] Listen error: {} — stopping stream", e); |
| STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst); |
| break; |
| } |
| } |
| } |
|
|
| STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst); |
| eprintln!("[SPF-STREAM] Streaming stopped"); |
| }); |
|
|
| Ok(()) |
| } |
|
|
| |
| pub fn stream_off() { |
| STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst); |
| eprintln!("[SPF-STREAM] Stop signal sent"); |
| } |
|
|
| |
| |
| pub fn stream_read() -> Vec<String> { |
| TRANSCRIPT_QUEUE |
| .lock() |
| .map(|mut q| std::mem::take(&mut *q)) |
| .unwrap_or_default() |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(feature = "voice-tts")] |
| pub struct PiperTTS { |
| session: OrtSession, |
| config: VoiceConfig, |
| model_sample_rate: u32, |
| } |
|
|
| #[cfg(feature = "voice-tts")] |
| impl PiperTTS { |
| pub fn new(model_path: &str, config_path: &str) -> Result<Self, VoiceError> { |
| let session = OrtSession::builder() |
| .and_then(|b| b.commit_from_file(model_path)) |
| .map_err(|e| VoiceError::DeviceError(format!("ONNX model load: {}", e)))?; |
|
|
| |
| let config_data = std::fs::read_to_string(config_path) |
| .map_err(|e| VoiceError::ConfigError(format!("Piper config read: {}", e)))?; |
| let config_json: serde_json::Value = serde_json::from_str(&config_data) |
| .map_err(|e| VoiceError::ConfigError(format!("Piper config parse: {}", e)))?; |
| let model_sample_rate = config_json["audio"]["sample_rate"].as_u64().unwrap_or(22050) as u32; |
|
|
| Ok(Self { |
| session, |
| config: VoiceConfig::default(), |
| model_sample_rate, |
| }) |
| } |
| } |
|
|
| #[cfg(feature = "voice-tts")] |
| impl TextToSpeech for PiperTTS { |
| fn synthesize(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError> { |
| |
| |
| let phoneme_ids: Vec<i64> = text.chars() |
| .map(|c| c as i64) |
| .collect(); |
|
|
| let input_len = phoneme_ids.len(); |
| let input_tensor = ort::Value::from_array( |
| ort::ArrayExtensions::into_dyn( |
| ndarray::Array2::from_shape_vec((1, input_len), phoneme_ids) |
| .map_err(|e| VoiceError::CodecError(format!("Tensor shape: {}", e)))? |
| ) |
| ).map_err(|e| VoiceError::CodecError(format!("Input tensor: {}", e)))?; |
|
|
| let input_lengths = ort::Value::from_array( |
| ndarray::Array1::from_vec(vec![input_len as i64]).into_dyn() |
| ).map_err(|e| VoiceError::CodecError(format!("Length tensor: {}", e)))?; |
|
|
| let scales = ort::Value::from_array( |
| ndarray::Array1::from_vec(vec![0.667f32, 1.0, 0.8]).into_dyn() |
| ).map_err(|e| VoiceError::CodecError(format!("Scales tensor: {}", e)))?; |
|
|
| let outputs = self.session.run(ort::inputs![input_tensor, input_lengths, scales] |
| .map_err(|e| VoiceError::CodecError(format!("Run inputs: {}", e)))?) |
| .map_err(|e| VoiceError::CodecError(format!("ONNX inference: {}", e)))?; |
|
|
| |
| let audio_f32: Vec<f32> = outputs[0] |
| .try_extract_tensor::<f32>() |
| .map_err(|e| VoiceError::CodecError(format!("Extract audio: {}", e)))? |
| .view() |
| .iter() |
| .copied() |
| .collect(); |
|
|
| |
| let pcm_bytes: Vec<u8> = audio_f32.iter() |
| .flat_map(|&s| { |
| let clamped = (s * 32767.0).clamp(-32768.0, 32767.0) as i16; |
| clamped.to_le_bytes() |
| }) |
| .collect(); |
|
|
| |
| let frame_bytes = (self.model_sample_rate as usize * self.config.channels as usize * 2 |
| * self.config.frame_duration_ms as usize) / 1000; |
| if frame_bytes == 0 { |
| return Err(VoiceError::ConfigError("Frame size zero".into())); |
| } |
|
|
| let timestamp = chrono::Utc::now().to_rfc3339(); |
| let frames: Vec<VoiceFrame> = pcm_bytes.chunks(frame_bytes) |
| .enumerate() |
| .map(|(i, chunk)| VoiceFrame { |
| codec: "pcm".into(), |
| sample_rate: self.model_sample_rate, |
| channels: self.config.channels, |
| frame_duration_ms: self.config.frame_duration_ms, |
| data: chunk.to_vec(), |
| seq: i as u64, |
| from: String::new(), |
| timestamp: timestamp.clone(), |
| team_id: None, |
| }) |
| .collect(); |
| Ok(frames) |
| } |
|
|
| fn is_available(&self) -> bool { |
| true |
| } |
|
|
| fn available_voices(&self) -> Vec<String> { |
| vec!["piper-default".into()] |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| pub struct VoiceSession { |
| pub config: VoiceConfig, |
| input: Option<Box<dyn AudioInput>>, |
| output: Option<Box<dyn AudioOutput>>, |
| stt: Option<Box<dyn SpeechToText>>, |
| tts: Option<Box<dyn TextToSpeech>>, |
| active: bool, |
| frames_sent: u64, |
| frames_received: u64, |
| } |
|
|
| impl VoiceSession { |
| pub fn new(config: VoiceConfig) -> Self { |
| Self { |
| config, |
| input: None, |
| output: None, |
| stt: None, |
| tts: None, |
| active: false, |
| frames_sent: 0, |
| frames_received: 0, |
| } |
| } |
|
|
| |
| |
| |
| pub fn start(&mut self) -> Result<(), VoiceError> { |
| |
| |
| |
| spf_voice::open() |
| .map_err(|e| VoiceError::DeviceError(format!("spf_voice::open failed: {}", e)))?; |
|
|
| |
| self.input = Some(Box::new(StubAudioInput::new())); |
| self.output = Some(Box::new(StubAudioOutput::new())); |
|
|
| |
| #[cfg(feature = "voice-stt")] |
| { |
| let model_dir = crate::paths::spf_root().join("LIVE/MODELS/whisper-tiny"); |
| match CandleWhisperSTT::new(&model_dir) { |
| Ok(stt) => { |
| eprintln!("[SPF-VOICE] STT: Candle Whisper active"); |
| self.stt = Some(Box::new(stt)); |
| } |
| Err(e) => { eprintln!("[SPF-VOICE] Whisper STT unavailable: {}", e); } |
| } |
| } |
|
|
| |
| let svs = spf_voice::status(); |
| eprintln!("[SPF-VOICE] spf-voice: tts={} input={} output={} codec={}", |
| svs.tts_available, svs.input_available, svs.output_available, svs.codec_available); |
|
|
| |
| #[cfg(feature = "voice-tts")] |
| { |
| let model_dir = crate::paths::spf_root().join("LIVE/MODELS/piper"); |
| let model = model_dir.join("en_US-lessac-medium.onnx"); |
| let cfg = model_dir.join("en_US-lessac-medium.onnx.json"); |
| if model.exists() && cfg.exists() { |
| match PiperTTS::new(&model.to_string_lossy(), &cfg.to_string_lossy()) { |
| Ok(piper) => { |
| self.tts = Some(Box::new(piper)); |
| eprintln!("[SPF-VOICE] TTS: Piper neural TTS active"); |
| } |
| Err(e) => { eprintln!("[SPF-VOICE] Piper TTS unavailable: {}", e); } |
| } |
| } |
| } |
|
|
| self.active = true; |
| eprintln!("[SPF-VOICE] Session started"); |
| Ok(()) |
| } |
|
|
| |
| pub fn stop(&mut self) { |
| |
| spf_voice::close(); |
|
|
| |
| self.input = None; |
| self.output = None; |
| self.stt = None; |
| self.tts = None; |
| self.active = false; |
| self.frames_sent = 0; |
| self.frames_received = 0; |
| eprintln!("[SPF-VOICE] Session stopped"); |
| } |
|
|
| pub fn is_active(&self) -> bool { |
| self.active |
| } |
|
|
| |
| pub fn status(&self) -> VoiceStatus { |
| let svs = spf_voice::status(); |
| VoiceStatus { |
| audio_input_available: svs.input_available, |
| audio_output_available: svs.output_available, |
| stt_available: self.stt.as_ref().map_or(false, |s| s.is_available()), |
| |
| tts_available: svs.tts_available |
| || self.tts.as_ref().map_or(false, |t| t.is_available()), |
| pipeline_open: svs.pipeline_open, |
| codec: self.config.codec.clone(), |
| sample_rate: self.config.sample_rate, |
| channels: self.config.channels, |
| } |
| } |
|
|
| |
| pub fn capture_frame(&mut self, from_identity: &str) -> Result<VoiceFrame, VoiceError> { |
| let pcm_i16 = spf_voice::listen(self.config.frame_duration_ms as u64) |
| .map_err(|e| VoiceError::DeviceError(e.to_string()))?; |
|
|
| let (codec, encoded) = match spf_voice::encode(&pcm_i16) { |
| Ok(data) => ("opus".to_string(), data), |
| Err(_) => { |
| |
| let pcm_bytes: Vec<u8> = pcm_i16.iter() |
| .flat_map(|s| s.to_le_bytes()) |
| .collect(); |
| ("pcm".to_string(), pcm_bytes) |
| } |
| }; |
|
|
| self.frames_sent += 1; |
| Ok(VoiceFrame { |
| codec, |
| sample_rate: self.config.sample_rate, |
| channels: self.config.channels, |
| frame_duration_ms: self.config.frame_duration_ms, |
| data: encoded, |
| seq: self.frames_sent, |
| from: from_identity.to_string(), |
| timestamp: chrono::Utc::now().to_rfc3339(), |
| team_id: None, |
| }) |
| } |
|
|
| |
| pub fn play_frame(&mut self, frame: &VoiceFrame) -> Result<(), VoiceError> { |
| let pcm_i16: Vec<i16> = if frame.codec == "opus" { |
| spf_voice::decode(&frame.data) |
| .map_err(|e| VoiceError::CodecError(e.to_string()))? |
| } else { |
| |
| frame.data.chunks_exact(2) |
| .map(|pair| i16::from_le_bytes([pair[0], pair[1]])) |
| .collect() |
| }; |
|
|
| spf_voice::play(&pcm_i16) |
| .map_err(|e| VoiceError::DeviceError(e.to_string()))?; |
| self.frames_received += 1; |
| Ok(()) |
| } |
|
|
| |
| pub fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError> { |
| let stt = self.stt.as_mut() |
| .ok_or_else(|| VoiceError::NotAvailable("No STT engine".into()))?; |
| stt.transcribe(frames) |
| } |
|
|
| |
| |
| |
| pub fn speak(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError> { |
| |
| match spf_voice::speak(text) { |
| Ok(()) => return Ok(vec![]), |
| Err(e) => { |
| eprintln!("[SPF-VOICE] spf-voice speak failed: {} — trying fallback", e); |
| } |
| } |
| |
| if let Some(ref mut tts) = self.tts { |
| tts.synthesize(text) |
| } else { |
| Err(VoiceError::NotAvailable("No TTS engine available".into())) |
| } |
| } |
|
|
| |
| pub fn stats(&self) -> serde_json::Value { |
| serde_json::json!({ |
| "active": self.active, |
| "frames_sent": self.frames_sent, |
| "frames_received": self.frames_received, |
| "status": self.status().to_json_value(), |
| }) |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct VoiceStatus { |
| pub audio_input_available: bool, |
| pub audio_output_available: bool, |
| pub stt_available: bool, |
| pub tts_available: bool, |
| pub pipeline_open: bool, |
| pub codec: String, |
| pub sample_rate: u32, |
| pub channels: u8, |
| } |
|
|
| impl VoiceStatus { |
| |
| pub fn from_stubs() -> Self { |
| let config = VoiceConfig::default(); |
| let svs = spf_voice::status(); |
| Self { |
| audio_input_available: svs.input_available, |
| audio_output_available: svs.output_available, |
| stt_available: false, |
| tts_available: svs.tts_available, |
| pipeline_open: svs.pipeline_open, |
| codec: config.codec, |
| sample_rate: config.sample_rate, |
| channels: config.channels, |
| } |
| } |
|
|
| |
| pub fn from_session(session: &VoiceSession) -> Self { |
| session.status() |
| } |
|
|
| pub fn to_json_value(&self) -> serde_json::Value { |
| let mode = if self.pipeline_open && (self.audio_input_available || self.audio_output_available) { |
| "pipeline_active" |
| } else if self.audio_input_available || self.audio_output_available { |
| "active" |
| } else if self.tts_available { |
| "tts_only" |
| } else { |
| "stubs_only" |
| }; |
| serde_json::json!({ |
| "status": mode, |
| "pipeline_open": self.pipeline_open, |
| "audio_input": self.audio_input_available, |
| "audio_output": self.audio_output_available, |
| "stt": self.stt_available, |
| "tts": self.tts_available, |
| "config": { |
| "codec": self.codec, |
| "sample_rate": self.sample_rate, |
| "channels": self.channels, |
| } |
| }) |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn frame_to_bytes(frame: &VoiceFrame) -> Result<Vec<u8>, String> { |
| serde_json::to_vec(frame).map_err(|e| format!("Serialize error: {}", e)) |
| } |
|
|
| |
| pub fn frame_from_bytes(data: &[u8]) -> Result<VoiceFrame, String> { |
| serde_json::from_slice(data).map_err(|e| format!("Deserialize error: {}", e)) |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct ActiveMember { |
| |
| pub peer_key: String, |
| |
| pub name: String, |
| |
| pub joined_at: String, |
| } |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct TeamChannel { |
| |
| pub team_id: String, |
| |
| pub name: String, |
| |
| pub members: Vec<String>, |
| |
| pub active_members: Vec<ActiveMember>, |
| |
| pub config: VoiceConfig, |
| |
| pub created_at: String, |
| |
| pub is_open: bool, |
| } |
|
|
| impl TeamChannel { |
| |
| pub fn new(team_id: &str, name: &str) -> Self { |
| Self { |
| team_id: team_id.to_string(), |
| name: name.to_string(), |
| members: Vec::new(), |
| active_members: Vec::new(), |
| config: VoiceConfig::default(), |
| created_at: chrono::Utc::now().to_rfc3339(), |
| is_open: true, |
| } |
| } |
|
|
| |
| pub fn add_member(&mut self, peer_key: &str) { |
| if !self.members.contains(&peer_key.to_string()) { |
| self.members.push(peer_key.to_string()); |
| } |
| } |
|
|
| |
| pub fn join(&mut self, peer_key: &str, display_name: &str) { |
| let already_active = self.active_members.iter().any(|m| m.peer_key == peer_key); |
| if !already_active { |
| self.active_members.push(ActiveMember { |
| peer_key: peer_key.to_string(), |
| name: display_name.to_string(), |
| joined_at: chrono::Utc::now().to_rfc3339(), |
| }); |
| } |
| |
| self.add_member(peer_key); |
| } |
|
|
| |
| pub fn leave(&mut self, peer_key: &str) -> bool { |
| let before = self.active_members.len(); |
| self.active_members.retain(|m| m.peer_key != peer_key); |
| self.active_members.len() < before |
| } |
|
|
| |
| pub fn active_count(&self) -> usize { |
| self.active_members.len() |
| } |
|
|
| |
| pub fn member_count(&self) -> usize { |
| self.members.len() |
| } |
|
|
| |
| pub fn active_peer_keys(&self) -> Vec<String> { |
| self.active_members.iter().map(|m| m.peer_key.clone()).collect() |
| } |
|
|
| |
| pub fn summary_line(&self) -> String { |
| format!("[{}] {} — {} invited, {} active, {}", |
| self.team_id, self.name, |
| self.member_count(), self.active_count(), |
| if self.is_open { "open" } else { "closed" }) |
| } |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct VoiceTeamEntry { |
| pub team_id: String, |
| pub name: String, |
| #[serde(default)] |
| pub members: Vec<String>, |
| } |
|
|
| |
| #[derive(Debug, Clone, Serialize, Deserialize)] |
| pub struct VoiceTeamsConfig { |
| |
| pub default_team: String, |
| |
| pub auto_accept_calls: bool, |
| |
| #[serde(default)] |
| pub teams: Vec<VoiceTeamEntry>, |
| } |
|
|
| impl Default for VoiceTeamsConfig { |
| fn default() -> Self { |
| Self { |
| default_team: "default".to_string(), |
| auto_accept_calls: false, |
| teams: vec![VoiceTeamEntry { |
| team_id: "default".to_string(), |
| name: "Default Channel".to_string(), |
| members: Vec::new(), |
| }], |
| } |
| } |
| } |
|
|
| |
| |
| pub static VOICE_TEAMS: std::sync::LazyLock<std::sync::Mutex<std::collections::HashMap<String, TeamChannel>>> = |
| std::sync::LazyLock::new(|| std::sync::Mutex::new(std::collections::HashMap::new())); |
|
|
| |
| |
| |
| pub fn init_voice_teams() { |
| let config_path = crate::paths::spf_root() |
| .join("LIVE/CONFIG/voice_teams.json"); |
|
|
| let cfg: VoiceTeamsConfig = if config_path.exists() { |
| match std::fs::read_to_string(&config_path) { |
| Ok(s) => serde_json::from_str(&s).unwrap_or_default(), |
| Err(e) => { |
| eprintln!("[SPF-VOICE] voice_teams.json read error: {} — using defaults", e); |
| VoiceTeamsConfig::default() |
| } |
| } |
| } else { |
| eprintln!("[SPF-VOICE] voice_teams.json not found at {:?} — using defaults", config_path); |
| VoiceTeamsConfig::default() |
| }; |
|
|
| let mut teams = VOICE_TEAMS.lock().unwrap_or_else(|e| e.into_inner()); |
| for entry in &cfg.teams { |
| |
| teams.entry(entry.team_id.clone()).or_insert_with(|| { |
| let mut ch = TeamChannel::new(&entry.team_id, &entry.name); |
| for m in &entry.members { |
| ch.add_member(m); |
| } |
| ch |
| }); |
| } |
| |
| teams.entry(cfg.default_team.clone()).or_insert_with(|| { |
| TeamChannel::new(&cfg.default_team, "Default Channel") |
| }); |
|
|
| eprintln!("[SPF-VOICE] Team channels ready: {} channel(s) loaded", teams.len()); |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| pub fn voice_audio_frame(voice_frame: &VoiceFrame) -> Result<crate::framing::Frame, String> { |
| let payload = frame_to_bytes(voice_frame)?; |
| Ok(crate::framing::Frame::new(crate::framing::StreamType::VoiceAudio, payload)) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn handle_mesh_voice(frame: &crate::framing::Frame, peer_key: &str) -> Option<crate::framing::Frame> { |
| let peer_short = &peer_key[..8.min(peer_key.len())]; |
|
|
| |
| match frame_from_bytes(&frame.payload) { |
| Ok(voice_frame) => { |
| eprintln!("[SPF-VOICE] Audio frame from {} seq={} codec={} {} bytes{}", |
| peer_short, voice_frame.seq, voice_frame.codec, voice_frame.data.len(), |
| voice_frame.team_id.as_deref().map(|id| format!(" team={}", id)).unwrap_or_default()); |
|
|
| |
| if let Some(ref team_id) = voice_frame.team_id { |
| |
| if let Ok(mut teams) = VOICE_TEAMS.lock() { |
| if let Some(channel) = teams.get_mut(team_id) { |
| |
| let is_member = channel.active_members.iter() |
| .any(|m| m.peer_key == peer_key); |
| if is_member { |
| |
| |
| if let Ok(mut lock) = VOICE_SESSION.lock() { |
| if let Some(ref mut vs) = *lock { |
| if vs.is_active() { |
| if let Err(e) = vs.play_frame(&voice_frame) { |
| eprintln!("[SPF-VOICE] Team playback error team={} seq={}: {}", |
| team_id, voice_frame.seq, e); |
| } |
| } |
| } |
| } |
| } else { |
| |
| eprintln!("[SPF-VOICE] Rejected team audio from non-member {} team={}", |
| peer_short, team_id); |
| return None; |
| } |
| } else { |
| |
| eprintln!("[SPF-VOICE] Rejected audio for unknown team={} from {}", |
| team_id, peer_short); |
| return None; |
| } |
| } |
| } else { |
| |
| |
| let in_active_call = { |
| let cs = CALL_STATE.lock().unwrap_or_else(|e| e.into_inner()); |
| cs.as_ref().map_or(false, |c| c.peer_key == peer_key && c.is_active()) |
| }; |
| if in_active_call { |
| if let Ok(mut lock) = VOICE_SESSION.lock() { |
| if let Some(ref mut vs) = *lock { |
| if vs.is_active() { |
| if let Err(e) = vs.play_frame(&voice_frame) { |
| eprintln!("[SPF-VOICE] Peer playback error seq={}: {}", voice_frame.seq, e); |
| } |
| } |
| } |
| } |
| } else { |
| |
| eprintln!("[SPF-VOICE] Rejected audio from {} — no active call", peer_short); |
| return None; |
| } |
| } |
|
|
| |
| let ack = serde_json::json!({ |
| "type": "voice_ack", |
| "status": "received", |
| "seq": voice_frame.seq, |
| "codec": voice_frame.codec, |
| "bytes_received": voice_frame.data.len(), |
| "voice_available": spf_voice::status().output_available, |
| }); |
| Some(crate::framing::Frame::new( |
| crate::framing::StreamType::VoiceAudio, |
| serde_json::to_vec(&ack).unwrap_or_default(), |
| )) |
| } |
|
|
| Err(_parse_err) => { |
| |
| |
| |
| if let Ok(json_val) = serde_json::from_slice::<serde_json::Value>(&frame.payload) { |
| match json_val.get("type").and_then(|t| t.as_str()) { |
| Some("voice_ack") => { |
| |
| let seq = json_val.get("seq").and_then(|s| s.as_u64()).unwrap_or(0); |
| eprintln!("[SPF-VOICE] Ack from {} seq={}", peer_short, seq); |
| |
| None |
| } |
| Some("voice_status") => { |
| |
| let status = json_val.get("status").and_then(|s| s.as_str()).unwrap_or("unknown"); |
| eprintln!("[SPF-VOICE] Status from {}: {}", peer_short, status); |
| |
| None |
| } |
| Some(other_type) => { |
| |
| |
| |
| eprintln!("[SPF-VOICE] Rejected unknown type '{}' on VoiceAudio stream from {} — must use ToolRpc for signaling", |
| other_type, peer_short); |
| None |
| } |
| None => { |
| |
| eprintln!("[SPF-VOICE] Untyped JSON on VoiceAudio stream from {} — dropped", |
| peer_short); |
| None |
| } |
| } |
| } else { |
| |
| eprintln!("[SPF-VOICE] Non-JSON non-VoiceFrame payload from {} ({} bytes) — dropped", |
| peer_short, frame.payload.len()); |
| None |
| } |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_voice_config_defaults() { |
| let config = VoiceConfig::default(); |
| assert_eq!(config.codec, "opus"); |
| assert_eq!(config.sample_rate, 16000); |
| assert_eq!(config.channels, 1); |
| assert_eq!(config.frame_duration_ms, 20); |
| assert!(config.vad_enabled); |
| } |
|
|
| #[test] |
| fn test_pcm_frame_bytes() { |
| let config = VoiceConfig::default(); |
| |
| assert_eq!(config.pcm_frame_bytes(), 640); |
| } |
|
|
| #[test] |
| fn test_stub_audio_input() { |
| let mut input = StubAudioInput::new(); |
| assert!(!input.is_available()); |
| assert!(input.read_frame().is_err()); |
| } |
|
|
| #[test] |
| fn test_stub_audio_output() { |
| let mut output = StubAudioOutput::new(); |
| assert!(!output.is_available()); |
| assert!(output.write_frame(&[0u8; 640]).is_err()); |
| } |
|
|
| #[test] |
| fn test_stub_stt() { |
| let mut stt = StubSTT; |
| assert!(!stt.is_available()); |
| assert!(stt.transcribe(&[]).is_err()); |
| assert!(stt.supported_languages().is_empty()); |
| } |
|
|
| #[test] |
| fn test_stub_tts() { |
| let mut tts = StubTTS; |
| assert!(!tts.is_available()); |
| assert!(tts.synthesize("hello").is_err()); |
| assert!(tts.available_voices().is_empty()); |
| } |
|
|
| #[test] |
| fn test_voice_status_stubs() { |
| let status = VoiceStatus::from_stubs(); |
| assert!(!status.audio_input_available); |
| assert!(!status.stt_available); |
| assert_eq!(status.codec, "opus"); |
| let json = status.to_json_value(); |
| assert_eq!(json["status"], "stubs_only"); |
| } |
|
|
| #[test] |
| fn test_voice_error_display() { |
| let err = VoiceError::NotAvailable("test".into()); |
| assert_eq!(format!("{}", err), "Not available: test"); |
| let err = VoiceError::CodecError("bad".into()); |
| assert_eq!(format!("{}", err), "Codec error: bad"); |
| } |
|
|
| #[test] |
| fn test_voice_frame_serialize_roundtrip() { |
| let frame = VoiceFrame { |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| data: vec![0xAA, 0xBB, 0xCC], |
| seq: 42, |
| from: "531d83fa".into(), |
| timestamp: "2026-02-28T12:00:00Z".into(), |
| team_id: None, |
| }; |
| let bytes = frame_to_bytes(&frame).unwrap(); |
| let loaded = frame_from_bytes(&bytes).unwrap(); |
| assert_eq!(loaded.codec, "opus"); |
| assert_eq!(loaded.seq, 42); |
| assert_eq!(loaded.data, vec![0xAA, 0xBB, 0xCC]); |
| assert_eq!(loaded.from, "531d83fa"); |
| } |
|
|
| |
| |
| |
|
|
| #[test] |
| fn test_spf_voice_status_no_panic() { |
| |
| let svs = spf_voice::status(); |
| |
| let _ = svs.tts_available; |
| let _ = svs.input_available; |
| let _ = svs.output_available; |
| let _ = svs.codec_available; |
| } |
|
|
| #[test] |
| fn test_voice_session_new() { |
| let config = VoiceConfig::default(); |
| let session = VoiceSession::new(config); |
| assert!(!session.is_active()); |
| assert_eq!(session.frames_sent, 0); |
| assert_eq!(session.frames_received, 0); |
| assert!(session.input.is_none()); |
| assert!(session.output.is_none()); |
| assert!(session.stt.is_none()); |
| assert!(session.tts.is_none()); |
| } |
|
|
| #[test] |
| fn test_voice_session_start_stop() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| |
| let result = session.start(); |
| assert!(result.is_ok()); |
| assert!(session.is_active()); |
| |
| assert!(session.input.is_some()); |
| assert!(session.output.is_some()); |
|
|
| session.stop(); |
| assert!(!session.is_active()); |
| assert!(session.input.is_none()); |
| assert!(session.output.is_none()); |
| assert!(session.stt.is_none()); |
| assert!(session.tts.is_none()); |
| assert_eq!(session.frames_sent, 0); |
| assert_eq!(session.frames_received, 0); |
| } |
|
|
| #[test] |
| fn test_voice_session_status_after_start() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| session.start().unwrap(); |
| let status = session.status(); |
| |
| let _ = status.audio_input_available; |
| let _ = status.audio_output_available; |
| let _ = status.stt_available; |
| assert_eq!(status.codec, "opus"); |
| assert_eq!(status.sample_rate, 16000); |
| assert_eq!(status.channels, 1); |
| session.stop(); |
| } |
|
|
| #[test] |
| fn test_voice_session_stats_json() { |
| let config = VoiceConfig::default(); |
| let session = VoiceSession::new(config); |
| let stats = session.stats(); |
| assert_eq!(stats["active"], false); |
| assert_eq!(stats["frames_sent"], 0); |
| assert_eq!(stats["frames_received"], 0); |
| assert!(stats["status"].is_object()); |
| } |
|
|
| #[test] |
| fn test_voice_session_capture_without_input() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| |
| let result = session.capture_frame("test_identity"); |
| assert!(result.is_err()); |
| match result { |
| Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("input")), |
| other => panic!("Expected NotAvailable, got: {:?}", other), |
| } |
| } |
|
|
| #[test] |
| fn test_voice_session_capture_with_stubs() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| session.start().unwrap(); |
| |
| let result = session.capture_frame("test_identity"); |
| assert!(result.is_err()); |
| session.stop(); |
| } |
|
|
| #[test] |
| fn test_voice_session_play_without_output() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| let frame = VoiceFrame { |
| codec: "pcm".into(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| data: vec![0u8; 640], |
| seq: 1, |
| from: "peer".into(), |
| timestamp: "2026-03-01T00:00:00Z".into(), |
| team_id: None, |
| }; |
| |
| let result = session.play_frame(&frame); |
| assert!(result.is_err()); |
| } |
|
|
| #[test] |
| fn test_voice_session_speak_without_tts() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| |
| let result = session.speak("hello world"); |
| assert!(result.is_err()); |
| match result { |
| Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("TTS")), |
| other => panic!("Expected NotAvailable, got: {:?}", other), |
| } |
| } |
|
|
| #[test] |
| fn test_voice_session_transcribe_without_stt() { |
| let config = VoiceConfig::default(); |
| let mut session = VoiceSession::new(config); |
| let result = session.transcribe(&[]); |
| assert!(result.is_err()); |
| match result { |
| Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("STT")), |
| other => panic!("Expected NotAvailable, got: {:?}", other), |
| } |
| } |
|
|
| #[test] |
| fn test_voice_status_from_session() { |
| let config = VoiceConfig::default(); |
| let session = VoiceSession::new(config); |
| let status = VoiceStatus::from_session(&session); |
| assert!(!status.audio_input_available); |
| assert!(!status.audio_output_available); |
| assert!(!status.stt_available); |
| assert!(!status.tts_available); |
| assert_eq!(status.codec, "opus"); |
| } |
|
|
| #[test] |
| fn test_voice_status_mode_active() { |
| let status = VoiceStatus { |
| audio_input_available: true, |
| audio_output_available: true, |
| stt_available: false, |
| tts_available: false, |
| pipeline_open: false, |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| }; |
| let json = status.to_json_value(); |
| assert_eq!(json["status"], "active"); |
| assert_eq!(json["audio_input"], true); |
| assert_eq!(json["audio_output"], true); |
| } |
|
|
| #[test] |
| fn test_voice_status_mode_tts_only() { |
| let status = VoiceStatus { |
| audio_input_available: false, |
| audio_output_available: false, |
| stt_available: false, |
| tts_available: true, |
| pipeline_open: false, |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| }; |
| let json = status.to_json_value(); |
| assert_eq!(json["status"], "tts_only"); |
| assert_eq!(json["tts"], true); |
| } |
|
|
| #[test] |
| fn test_voice_status_mode_stubs_only() { |
| let status = VoiceStatus { |
| audio_input_available: false, |
| audio_output_available: false, |
| stt_available: false, |
| tts_available: false, |
| pipeline_open: false, |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| }; |
| let json = status.to_json_value(); |
| assert_eq!(json["status"], "stubs_only"); |
| } |
|
|
| #[test] |
| fn test_voice_status_input_only_counts_as_active() { |
| |
| let status = VoiceStatus { |
| audio_input_available: true, |
| audio_output_available: false, |
| stt_available: false, |
| tts_available: false, |
| pipeline_open: false, |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| }; |
| let json = status.to_json_value(); |
| assert_eq!(json["status"], "active"); |
| } |
|
|
| #[test] |
| fn test_handle_mesh_voice_valid_frame() { |
| let frame = VoiceFrame { |
| codec: "pcm".into(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| data: vec![0x00; 640], |
| seq: 7, |
| from: "test_peer".into(), |
| timestamp: "2026-03-01T00:00:00Z".into(), |
| team_id: None, |
| }; |
| let serialized = frame_to_bytes(&frame).unwrap(); |
| let mesh_frame = crate::framing::Frame::new( |
| crate::framing::StreamType::VoiceAudio, |
| serialized, |
| ); |
| let response = handle_mesh_voice(&mesh_frame, "abcdef1234567890"); |
| assert!(response.is_some()); |
| let resp = response.unwrap(); |
| let payload: serde_json::Value = serde_json::from_slice(&resp.payload).unwrap(); |
| assert_eq!(payload["type"], "voice_ack"); |
| assert_eq!(payload["status"], "received"); |
| assert_eq!(payload["seq"], 7); |
| assert_eq!(payload["codec"], "pcm"); |
| assert_eq!(payload["bytes_received"], 640); |
| } |
|
|
| #[test] |
| fn test_handle_mesh_voice_invalid_payload() { |
| let mesh_frame = crate::framing::Frame::new( |
| crate::framing::StreamType::VoiceAudio, |
| vec![0xFF, 0xFE, 0xFD], |
| ); |
| |
| let response = handle_mesh_voice(&mesh_frame, "deadbeef12345678"); |
| assert!(response.is_none()); |
| } |
|
|
| #[test] |
| fn test_handle_mesh_voice_empty_payload() { |
| let mesh_frame = crate::framing::Frame::new( |
| crate::framing::StreamType::VoiceAudio, |
| vec![], |
| ); |
| |
| let response = handle_mesh_voice(&mesh_frame, "1234abcd"); |
| assert!(response.is_none()); |
| } |
|
|
| #[test] |
| fn test_handle_mesh_voice_short_peer_key() { |
| |
| let frame = VoiceFrame { |
| codec: "pcm".into(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| data: vec![0x01], |
| seq: 0, |
| from: "short".into(), |
| timestamp: "2026-03-01T00:00:00Z".into(), |
| team_id: None, |
| }; |
| let serialized = frame_to_bytes(&frame).unwrap(); |
| let mesh_frame = crate::framing::Frame::new( |
| crate::framing::StreamType::VoiceAudio, |
| serialized, |
| ); |
| |
| let response = handle_mesh_voice(&mesh_frame, "abcd"); |
| assert!(response.is_some()); |
| } |
|
|
| #[test] |
| fn test_voice_config_custom_stereo() { |
| let config = VoiceConfig { |
| codec: "pcm".into(), |
| sample_rate: 48000, |
| channels: 2, |
| frame_duration_ms: 10, |
| bitrate: 64000, |
| vad_enabled: false, |
| }; |
| |
| assert_eq!(config.pcm_frame_bytes(), 1920); |
| assert!(!config.vad_enabled); |
| } |
|
|
| #[test] |
| fn test_voice_config_8khz_mono() { |
| let config = VoiceConfig { |
| codec: "opus".into(), |
| sample_rate: 8000, |
| channels: 1, |
| frame_duration_ms: 20, |
| bitrate: 12000, |
| vad_enabled: true, |
| }; |
| |
| assert_eq!(config.pcm_frame_bytes(), 320); |
| } |
|
|
| #[test] |
| fn test_voice_error_all_variants() { |
| let errors = vec![ |
| VoiceError::NotAvailable("na".into()), |
| VoiceError::CodecError("ce".into()), |
| VoiceError::DeviceError("de".into()), |
| VoiceError::ConfigError("cfg".into()), |
| ]; |
| let expected = vec![ |
| "Not available: na", |
| "Codec error: ce", |
| "Device error: de", |
| "Config error: cfg", |
| ]; |
| for (err, exp) in errors.iter().zip(expected.iter()) { |
| assert_eq!(format!("{}", err), *exp); |
| } |
| } |
|
|
| #[test] |
| fn test_frame_from_bytes_error() { |
| let result = frame_from_bytes(b"not json"); |
| assert!(result.is_err()); |
| let err_msg = result.unwrap_err(); |
| assert!(err_msg.contains("Deserialize")); |
| } |
|
|
| #[test] |
| fn test_voice_frame_clone() { |
| let frame = VoiceFrame { |
| codec: "opus".into(), |
| sample_rate: 16000, |
| channels: 1, |
| frame_duration_ms: 20, |
| data: vec![1, 2, 3], |
| seq: 99, |
| from: "sender".into(), |
| timestamp: "2026-03-01T00:00:00Z".into(), |
| team_id: None, |
| }; |
| let cloned = frame.clone(); |
| assert_eq!(cloned.seq, 99); |
| assert_eq!(cloned.data, vec![1, 2, 3]); |
| assert_eq!(cloned.from, "sender"); |
| } |
| } |
|
|
|
|