SPFsmartGATE / src /voice.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Voice Pipeline (Block Q + Block CC)
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// Voice communication over mesh with real + fallback implementations.
//
// Mesh stream type: 0x03 (framing::StreamType::VoiceAudio)
//
// Architecture:
// AudioInput → encode → VoiceFrame → mesh (0x03) → decode → AudioOutput
// AudioInput → STT → text → ChatEngine (Block P)
// ChatEngine → TTS → VoiceFrame → AudioOutput
//
// Implementation tiers:
// Always available: spf-voice crate (espeak-ng FFI in-process + cpal + opus static), VoiceSession, stubs
// Feature "voice-stt": CandleWhisperSTT (candle-transformers, no C deps)
// Feature "voice-tts": PiperTTS (ort/ONNX)
//
// Depends on: framing.rs (Block F, StreamType::VoiceAudio)
use serde::{Deserialize, Serialize};
// ============================================================================
// SHARED VOICE SESSION STATE (Block LL)
// ============================================================================
/// Global voice session — shared between MCP handler (mcp.rs) and mesh handler.
/// Mutex::new is const since Rust 1.63 — no lazy_static needed.
/// MCP "speak" action plays TTS through this.
/// Mesh voice receive plays incoming audio through this.
pub static VOICE_SESSION: std::sync::Mutex<Option<VoiceSession>> = std::sync::Mutex::new(None);
// ============================================================================
// VOICE LINE ARCHITECTURE (Block MM)
// ============================================================================
/// Voice mode: Light (zero battery) vs Rich (full pipeline)
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
pub enum VoiceMode {
/// spf-voice in-process TTS (espeak-ng FFI), zero session, zero battery
Light,
/// spf-voice full pipeline + Piper TTS (voice-tts), persistent session
Rich,
}
impl Default for VoiceMode {
fn default() -> Self { Self::Light }
}
/// Voice settings for both Agent Line and Peer Line
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct VoiceSettings {
/// Agent Line TTS mode: Light (spf-voice/espeak-ng FFI) | Rich (spf-voice + Piper ONNX)
pub agent_tts_mode: VoiceMode,
/// Agent Line STT mode: Light (text input) | Rich (whisper)
pub agent_stt_mode: VoiceMode,
/// Peer Line audio quality: Light (12kHz) | Rich (24kHz)
pub peer_quality: VoiceMode,
/// Auto-answer incoming peer calls
pub auto_accept_calls: bool,
/// Auto end-call after this many seconds of silence
pub idle_timeout_secs: u32,
}
impl Default for VoiceSettings {
fn default() -> Self {
Self {
agent_tts_mode: VoiceMode::Light,
agent_stt_mode: VoiceMode::Light,
peer_quality: VoiceMode::Light,
auto_accept_calls: false,
idle_timeout_secs: 300, // 5 minutes
}
}
}
/// Call direction — who initiated the call
#[derive(Debug, Clone, PartialEq)]
pub enum CallDirection {
/// We placed the call (outbound ring sent)
Outgoing,
/// Peer placed the call (inbound ring received)
Incoming,
}
/// Call lifecycle phase
#[derive(Debug, Clone, PartialEq)]
pub enum CallPhase {
/// Ring sent or received — waiting for accept or reject
Ringing,
/// Call accepted — audio flowing (or ready to flow when cpal enabled)
Active,
/// Call ended — CALL_STATE is cleared immediately after this phase
Ended,
}
/// Active call state for Peer Line (SPF ↔ SPF)
#[derive(Debug, Clone)]
pub struct CallState {
/// Full mesh peer key of the other party
pub peer_key: String,
/// Display name — short key prefix if peer name unknown
pub peer_name: String,
/// Whether we initiated (Outgoing) or received (Incoming)
pub direction: CallDirection,
/// Current lifecycle phase
pub phase: CallPhase,
/// When the call was initiated — used for duration tracking
pub started_at: std::time::Instant,
}
impl CallState {
/// True when audio is flowing (phase == Active)
pub fn is_active(&self) -> bool {
matches!(self.phase, CallPhase::Active)
}
/// True when waiting for accept/reject (phase == Ringing)
pub fn is_ringing(&self) -> bool {
matches!(self.phase, CallPhase::Ringing)
}
/// True when Ringing OR Active — blocks new outbound calls
pub fn is_in_progress(&self) -> bool {
matches!(self.phase, CallPhase::Ringing | CallPhase::Active)
}
/// Elapsed seconds since call initiated
pub fn elapsed_secs(&self) -> u64 {
self.started_at.elapsed().as_secs()
}
}
/// Voice settings — persistent across actions (Block MM)
pub static VOICE_SETTINGS: std::sync::Mutex<VoiceSettings> = std::sync::Mutex::new(VoiceSettings {
agent_tts_mode: VoiceMode::Light,
agent_stt_mode: VoiceMode::Light,
peer_quality: VoiceMode::Light,
auto_accept_calls: false,
idle_timeout_secs: 300,
});
/// Active peer call state — None when no call active (Block MM)
pub static CALL_STATE: std::sync::Mutex<Option<CallState>> = std::sync::Mutex::new(None);
// Feature-gated imports — excluded from compilation when features disabled
// WB-3: spf-voice crate removed (Android-only deps). Inline stub satisfies all call sites.
// pub(crate) — accessible from mcp.rs via `use crate::voice::spf_voice`
pub(crate) mod spf_voice {
/// Status of the voice pipeline hardware — all false in stub mode.
pub struct Status {
pub tts_available: bool,
pub input_available: bool,
pub output_available: bool,
pub codec_available: bool,
pub pipeline_open: bool,
}
/// Open the persistent audio pipeline. Stub: always succeeds (no-op).
pub fn open() -> Result<(), String> { Ok(()) }
/// Close the persistent audio pipeline. Stub: no-op.
pub fn close() {}
/// Capture `_ms` milliseconds of audio. Stub: returns empty buffer.
pub fn listen(_ms: u64) -> Result<Vec<i16>, String> { Ok(vec![]) }
/// Synthesise `_text` to speech. Stub: not available.
pub fn speak(_text: &str) -> Result<(), String> {
Err("spf-voice not available (WB-3 stub)".into())
}
/// Encode PCM i16 → Opus bytes. Stub: not available.
pub fn encode(_pcm: &[i16]) -> Result<Vec<u8>, String> {
Err("spf-voice not available (WB-3 stub)".into())
}
/// Decode Opus bytes → PCM i16. Stub: not available.
pub fn decode(_data: &[u8]) -> Result<Vec<i16>, String> {
Err("spf-voice not available (WB-3 stub)".into())
}
/// Play PCM i16 via speaker. Stub: not available.
pub fn play(_pcm: &[i16]) -> Result<(), String> {
Err("spf-voice not available (WB-3 stub)".into())
}
/// Query hardware pipeline status. Stub: all capabilities false.
pub fn status() -> Status {
Status {
tts_available: false,
input_available: false,
output_available: false,
codec_available: false,
pipeline_open: false,
}
}
}
// ============================================================================
// VOICE FRAME
// ============================================================================
/// A single voice audio frame for mesh transport.
/// Payload of framing::StreamType::VoiceAudio (0x03).
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VoiceFrame {
/// Audio codec identifier
pub codec: String,
/// Sample rate in Hz
pub sample_rate: u32,
/// Number of audio channels (1 = mono, 2 = stereo)
pub channels: u8,
/// Frame duration in milliseconds
pub frame_duration_ms: u16,
/// Encoded audio data (codec-dependent)
pub data: Vec<u8>,
/// Sequence number for ordering
pub seq: u64,
/// Sender identity (short pub_key hash)
pub from: String,
/// Timestamp (RFC3339)
pub timestamp: String,
/// Team channel ID — None = peer-to-peer call, Some(id) = team channel frame
/// serde(default) ensures old frames without this field still deserialize correctly
#[serde(default)]
pub team_id: Option<String>,
}
// ============================================================================
// VOICE CONFIGURATION
// ============================================================================
/// Voice system configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VoiceConfig {
/// Audio codec (target: "opus")
pub codec: String,
/// Sample rate in Hz (target: 16000 for speech)
pub sample_rate: u32,
/// Channels (1 = mono — sufficient for speech)
pub channels: u8,
/// Frame duration in ms (20ms is standard for Opus)
pub frame_duration_ms: u16,
/// Bitrate in bits/sec (target: 24000 for speech quality)
pub bitrate: u32,
/// Enable voice activity detection
pub vad_enabled: bool,
}
impl Default for VoiceConfig {
fn default() -> Self {
Self {
codec: "opus".to_string(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
bitrate: 24000,
vad_enabled: true,
}
}
}
impl VoiceConfig {
/// Bytes per frame (uncompressed PCM)
/// sample_rate × channels × 2 (16-bit) × frame_duration / 1000
pub fn pcm_frame_bytes(&self) -> usize {
(self.sample_rate as usize * self.channels as usize * 2
* self.frame_duration_ms as usize) / 1000
}
}
// ============================================================================
// VOICE TRAITS — Interfaces for future implementation
// ============================================================================
/// Audio input source (microphone, file, stream)
pub trait AudioInput: Send + Sync {
/// Read one frame of PCM audio. Returns raw PCM bytes.
fn read_frame(&mut self) -> Result<Vec<u8>, VoiceError>;
/// Check if input is available
fn is_available(&self) -> bool;
/// Get input configuration
fn config(&self) -> &VoiceConfig;
}
/// Audio output sink (speaker, file, stream)
pub trait AudioOutput: Send + Sync {
/// Write one frame of PCM audio
fn write_frame(&mut self, pcm_data: &[u8]) -> Result<(), VoiceError>;
/// Check if output is available
fn is_available(&self) -> bool;
/// Get output configuration
fn config(&self) -> &VoiceConfig;
}
/// Speech-to-Text engine
pub trait SpeechToText: Send + Sync {
/// Transcribe audio frames to text
fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError>;
/// Check if STT engine is available
fn is_available(&self) -> bool;
/// Get supported languages
fn supported_languages(&self) -> Vec<String>;
}
/// Text-to-Speech engine
pub trait TextToSpeech: Send + Sync {
/// Synthesize text to audio frames
fn synthesize(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError>;
/// Check if TTS engine is available
fn is_available(&self) -> bool;
/// Get available voices
fn available_voices(&self) -> Vec<String>;
}
/// Voice system error
#[derive(Debug, Clone)]
pub enum VoiceError {
/// Feature not yet implemented
NotAvailable(String),
/// Codec error (encode/decode failure)
CodecError(String),
/// Hardware/device error
DeviceError(String),
/// Configuration error
ConfigError(String),
}
impl std::fmt::Display for VoiceError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
VoiceError::NotAvailable(msg) => write!(f, "Not available: {}", msg),
VoiceError::CodecError(msg) => write!(f, "Codec error: {}", msg),
VoiceError::DeviceError(msg) => write!(f, "Device error: {}", msg),
VoiceError::ConfigError(msg) => write!(f, "Config error: {}", msg),
}
}
}
// ============================================================================
// STUB IMPLEMENTATIONS — All return "not available"
// ============================================================================
/// Stub audio input — always returns NotAvailable
pub struct StubAudioInput {
config: VoiceConfig,
}
impl StubAudioInput {
pub fn new() -> Self {
Self { config: VoiceConfig::default() }
}
}
impl AudioInput for StubAudioInput {
fn read_frame(&mut self) -> Result<Vec<u8>, VoiceError> {
Err(VoiceError::NotAvailable("Audio input not implemented".into()))
}
fn is_available(&self) -> bool {
false
}
fn config(&self) -> &VoiceConfig {
&self.config
}
}
/// Stub audio output — always returns NotAvailable
pub struct StubAudioOutput {
config: VoiceConfig,
}
impl StubAudioOutput {
pub fn new() -> Self {
Self { config: VoiceConfig::default() }
}
}
impl AudioOutput for StubAudioOutput {
fn write_frame(&mut self, _pcm_data: &[u8]) -> Result<(), VoiceError> {
Err(VoiceError::NotAvailable("Audio output not implemented".into()))
}
fn is_available(&self) -> bool {
false
}
fn config(&self) -> &VoiceConfig {
&self.config
}
}
/// Stub STT — always returns NotAvailable
pub struct StubSTT;
impl SpeechToText for StubSTT {
fn transcribe(&mut self, _frames: &[VoiceFrame]) -> Result<String, VoiceError> {
Err(VoiceError::NotAvailable("Speech-to-text not implemented".into()))
}
fn is_available(&self) -> bool {
false
}
fn supported_languages(&self) -> Vec<String> {
vec![] // No languages available
}
}
/// Stub TTS — always returns NotAvailable
pub struct StubTTS;
impl TextToSpeech for StubTTS {
fn synthesize(&mut self, _text: &str) -> Result<Vec<VoiceFrame>, VoiceError> {
Err(VoiceError::NotAvailable("Text-to-speech not implemented".into()))
}
fn is_available(&self) -> bool {
false
}
fn available_voices(&self) -> Vec<String> {
vec![] // No voices available
}
}
// EspeakTTS removed — TTS is now handled by spf-voice crate (in-process FFI, no subprocess)
// OpusCodec removed — encode/decode now handled by spf-voice crate (libopus.a static FFI)
// CpalAudioInput removed — microphone capture now handled by spf-voice crate (spf_voice::listen())
// CpalAudioOutput removed — speaker playback now handled by spf-voice crate (spf_voice::play())
// ============================================================================
// VOICE TOKENIZER — GPT2 byte-level BPE decode, zero external crates
//
// Whisper uses GPT2's byte-level BPE tokenizer. Every token string is encoded
// so that each character maps to exactly one byte via the GPT2 Unicode↔byte
// table. To decode: char → byte (via table), collect bytes, UTF-8 decode.
//
// Reads vocab.json: {"<|endoftext|>": 50256, "the": 1, "Ġthe": 262, ...}
// ============================================================================
#[cfg(feature = "voice-stt")]
pub struct VoiceTokenizer {
id_to_token: std::collections::HashMap<u32, String>,
token_to_id_map: std::collections::HashMap<String, u32>,
unicode_to_byte: std::collections::HashMap<char, u8>,
special_ids: std::collections::HashSet<u32>,
}
#[cfg(feature = "voice-stt")]
impl VoiceTokenizer {
/// Load from vocab.json: {"token_string": token_id, ...}
pub fn from_file(vocab_path: &std::path::Path) -> Result<Self, VoiceError> {
let json = std::fs::read_to_string(vocab_path)
.map_err(|e| VoiceError::ConfigError(format!("vocab.json read: {}", e)))?;
let mut token_to_id_map: std::collections::HashMap<String, u32> =
serde_json::from_str(&json)
.map_err(|e| VoiceError::ConfigError(format!("vocab.json parse: {}", e)))?;
// Merge added_tokens.json if present — Whisper special tokens live here
if let Some(parent) = vocab_path.parent() {
let added_path = parent.join("added_tokens.json");
if let Ok(added_json) = std::fs::read_to_string(&added_path) {
if let Ok(added) = serde_json::from_str::<std::collections::HashMap<String, u32>>(&added_json) {
token_to_id_map.extend(added);
}
}
}
let unicode_to_byte = Self::build_unicode_to_byte();
let mut special_ids = std::collections::HashSet::new();
let id_to_token: std::collections::HashMap<u32, String> = token_to_id_map
.iter()
.map(|(tok, &id)| {
if tok.starts_with("<|") && tok.ends_with("|>") {
special_ids.insert(id);
}
(id, tok.clone())
})
.collect();
Ok(Self { id_to_token, token_to_id_map, unicode_to_byte, special_ids })
}
/// Look up token string → ID.
pub fn token_to_id(&self, token: &str) -> Option<u32> {
self.token_to_id_map.get(token).copied()
}
/// Decode token IDs → UTF-8 text. Skips special tokens when requested.
pub fn decode(&self, ids: &[u32], skip_special: bool) -> String {
let bytes: Vec<u8> = ids
.iter()
.filter(|&&id| !skip_special || !self.special_ids.contains(&id))
.filter_map(|id| self.id_to_token.get(id))
.flat_map(|tok| {
tok.chars().filter_map(|c| self.unicode_to_byte.get(&c).copied())
})
.collect();
String::from_utf8_lossy(&bytes).trim().to_string()
}
/// GPT2 Unicode↔byte mapping (inverse, for decoding).
/// Printable bytes (33–126, 161–172, 174–255) map to themselves.
/// The 68 invisible bytes map to codepoints 256–323 (Ā, ā, Ă, …).
fn build_unicode_to_byte() -> std::collections::HashMap<char, u8> {
let visible: Vec<u8> = (b'!'..=b'~')
.chain(b'\xa1'..=b'\xac')
.chain(b'\xae'..=b'\xff')
.collect();
let visible_set: std::collections::HashSet<u8> = visible.iter().copied().collect();
let mut map = std::collections::HashMap::with_capacity(256);
// Visible bytes: Unicode codepoint == byte value
for &b in &visible {
map.insert(b as char, b);
}
// Invisible bytes: codepoints 256, 257, … in order
let mut cp = 256u32;
for b in 0u8..=255 {
if !visible_set.contains(&b) {
if let Some(c) = char::from_u32(cp) {
map.insert(c, b);
}
cp += 1;
}
}
map
}
}
// ============================================================================
// CANDLE WHISPER STT — Speech-to-Text (feature = "voice-stt")
// Pure Rust — no C++ deps. Uses candle-transformers already compiled in binary.
// Model dir: LIVE/MODELS/whisper-tiny/
// Required: config.json vocab.json model.safetensors melfilters.bytes
// ============================================================================
#[cfg(feature = "voice-stt")]
pub struct CandleWhisperSTT {
model: candle_transformers::models::whisper::model::Whisper,
tokenizer: VoiceTokenizer,
mel_filters: Vec<f32>,
config: candle_transformers::models::whisper::Config,
device: candle_core::Device,
eot_token: u32,
sot_token: u32,
transcribe_token: u32,
no_timestamps_token: u32,
}
// SAFETY: CandleWhisperSTT is owned exclusively by one thread at a time.
// The Whisper model's internal KV cache (RefCell) is never accessed concurrently.
#[cfg(feature = "voice-stt")]
unsafe impl Send for CandleWhisperSTT {}
#[cfg(feature = "voice-stt")]
impl CandleWhisperSTT {
/// Load from model directory.
/// Expects: config.json, tokenizer.json, model.safetensors, melfilters.bytes
pub fn new(model_dir: &std::path::Path) -> Result<Self, VoiceError> {
// ── Config ──────────────────────────────────────────────────────────
let config_str = std::fs::read_to_string(model_dir.join("config.json"))
.map_err(|e| VoiceError::DeviceError(format!("Whisper config.json: {}", e)))?;
let config: candle_transformers::models::whisper::Config =
serde_json::from_str(&config_str)
.map_err(|e| VoiceError::ConfigError(format!("Whisper config parse: {}", e)))?;
// ── Tokenizer ───────────────────────────────────────────────────────
let tokenizer = VoiceTokenizer::from_file(&model_dir.join("vocab.json"))?;
let eot_token = tokenizer
.token_to_id(candle_transformers::models::whisper::EOT_TOKEN)
.ok_or_else(|| VoiceError::ConfigError("EOT token missing".into()))?;
let sot_token = tokenizer
.token_to_id(candle_transformers::models::whisper::SOT_TOKEN)
.ok_or_else(|| VoiceError::ConfigError("SOT token missing".into()))?;
let transcribe_token = tokenizer
.token_to_id(candle_transformers::models::whisper::TRANSCRIBE_TOKEN)
.ok_or_else(|| VoiceError::ConfigError("TRANSCRIBE token missing".into()))?;
let no_timestamps_token = tokenizer
.token_to_id(candle_transformers::models::whisper::NO_TIMESTAMPS_TOKEN)
.ok_or_else(|| VoiceError::ConfigError("NO_TIMESTAMPS token missing".into()))?;
// ── Mel filters ─────────────────────────────────────────────────────
// melfilters.bytes: little-endian f32, 80 × 201 = 16 080 values = 64 320 bytes
let mel_bytes = std::fs::read(model_dir.join("melfilters.bytes"))
.map_err(|e| VoiceError::DeviceError(format!("melfilters.bytes: {}", e)))?;
let mel_filters: Vec<f32> = mel_bytes
.chunks_exact(4)
.map(|b| f32::from_le_bytes(b.try_into().unwrap()))
.collect();
// ── Weights ─────────────────────────────────────────────────────────
let device = candle_core::Device::Cpu;
let vb = unsafe {
candle_nn::VarBuilder::from_mmaped_safetensors(
&[model_dir.join("model.safetensors")],
candle_transformers::models::whisper::DTYPE,
&device,
)
.map_err(|e| VoiceError::DeviceError(format!("Weights load: {}", e)))?
};
let model =
candle_transformers::models::whisper::model::Whisper::load(&vb, config.clone())
.map_err(|e| VoiceError::DeviceError(format!("Model init: {}", e)))?;
eprintln!("[SPF-STT] Candle Whisper loaded from {:?}", model_dir);
Ok(Self {
model,
tokenizer,
mel_filters,
config,
device,
eot_token,
sot_token,
transcribe_token,
no_timestamps_token,
})
}
/// Convert f32 PCM (16 kHz, mono, [-1.0, 1.0]) to mel spectrogram Tensor.
pub fn audio_to_mel(&self, samples: &[f32]) -> Result<candle_core::Tensor, VoiceError> {
let mel = candle_transformers::models::whisper::audio::pcm_to_mel(
&self.config,
samples,
&self.mel_filters,
);
let mel_len = mel.len();
let n_mel = self.config.num_mel_bins;
candle_core::Tensor::from_vec(mel, (1, n_mel, mel_len / n_mel), &self.device)
.map_err(|e| VoiceError::DeviceError(format!("Mel tensor: {}", e)))
}
/// Greedy decoder loop: mel Tensor → token sequence → text.
pub fn decode_segment(&mut self, mel: &candle_core::Tensor) -> Result<String, VoiceError> {
let audio_features = self.model.encoder.forward(mel, true)
.map_err(|e| VoiceError::DeviceError(format!("Encoder: {}", e)))?;
// Seed: <|startoftranscript|> <|transcribe|> <|notimestamps|>
let mut tokens: Vec<u32> =
vec![self.sot_token, self.transcribe_token, self.no_timestamps_token];
let max_tokens = self.config.max_target_positions;
for i in 0..max_tokens {
let tokens_t = candle_core::Tensor::new(tokens.as_slice(), &self.device)
.and_then(|t| t.unsqueeze(0))
.map_err(|e| VoiceError::DeviceError(format!("Token tensor: {}", e)))?;
let ys = self.model.decoder.forward(&tokens_t, &audio_features, i == 0)
.map_err(|e| VoiceError::DeviceError(format!("Decoder step {}: {}", i, e)))?;
let (_, seq_len, _) = ys.dims3()
.map_err(|e| VoiceError::DeviceError(format!("dims3: {}", e)))?;
if seq_len == 0 { break; }
let logits = self.model.decoder
.final_linear(&ys.i((..1, seq_len - 1..))
.map_err(|e| VoiceError::DeviceError(format!("Slice: {}", e)))?)
.and_then(|t| t.i(0))
.and_then(|t| t.i(0))
.map_err(|e| VoiceError::DeviceError(format!("Logits: {}", e)))?;
let logits_v: Vec<f32> = logits.to_vec1()
.map_err(|e| VoiceError::DeviceError(format!("to_vec1: {}", e)))?;
let next_token = logits_v
.iter()
.enumerate()
.max_by(|(_, a), (_, b)| a.total_cmp(b))
.map(|(idx, _)| idx as u32)
.unwrap_or(self.eot_token);
if next_token == self.eot_token || tokens.len() >= max_tokens {
break;
}
tokens.push(next_token);
}
let text = self.tokenizer.decode(&tokens, true);
Ok(text)
}
}
#[cfg(feature = "voice-stt")]
impl SpeechToText for CandleWhisperSTT {
fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError> {
if frames.is_empty() {
return Ok(String::new());
}
// Frames carry i16 LE PCM bytes — convert to f32 [-1.0, 1.0] for Whisper
let samples: Vec<f32> = frames
.iter()
.flat_map(|f| f.data.chunks_exact(2))
.map(|pair| i16::from_le_bytes([pair[0], pair[1]]) as f32 / 32768.0)
.collect();
let mel = self.audio_to_mel(&samples)?;
self.decode_segment(&mel)
}
fn is_available(&self) -> bool { true }
fn supported_languages(&self) -> Vec<String> {
vec![
"en".into(), "es".into(), "fr".into(), "de".into(), "it".into(),
"pt".into(), "nl".into(), "ja".into(), "ko".into(), "zh".into(),
"ru".into(), "ar".into(), "hi".into(), "pl".into(), "sv".into(),
]
}
}
// ============================================================================
// STREAMING VOICE-TO-TEXT — Continuous live transcription
//
// stream_on → opens mic, transcribes continuously, no fixed duration
// stream_off → signals thread to exit (or say "end stream" / "stop stream")
// stream_read → drains all transcribed text accumulated since last call
// ============================================================================
/// Transcribed text segments from the streaming pipeline.
/// Drained by stream_read(). Thread-safe across all tool calls.
pub static TRANSCRIPT_QUEUE: std::sync::Mutex<Vec<String>> =
std::sync::Mutex::new(Vec::new());
/// Controls the streaming thread lifecycle.
/// true = running. false = thread exits after current chunk.
pub static STREAM_ACTIVE: std::sync::atomic::AtomicBool =
std::sync::atomic::AtomicBool::new(false);
/// Start continuous voice-to-text via spf-voice (in-process cpal) — independent of VOICE_SESSION.
/// Each ~10 s chunk is captured via spf_voice::listen(), transcribed, pushed to TRANSCRIPT_QUEUE.
/// Recognises "end stream" or "stop stream" in transcript to self-terminate.
#[cfg(feature = "voice-stt")]
pub fn stream_on(model_dir: std::path::PathBuf) -> Result<(), VoiceError> {
if STREAM_ACTIVE.load(std::sync::atomic::Ordering::SeqCst) {
return Err(VoiceError::DeviceError(
"Stream already active — call stream_off first".into(),
));
}
// Load model on calling thread: fail fast with a clear error message
let mut stt = CandleWhisperSTT::new(&model_dir)?;
STREAM_ACTIVE.store(true, std::sync::atomic::Ordering::SeqCst);
// Clear stale transcripts from any previous stream
if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { q.clear(); }
std::thread::spawn(move || {
eprintln!("[SPF-STREAM] Streaming started — via spf-voice, 16 kHz mono, continuous");
// ~10 s chunks: Whisper works best with 10-30 s segments
const CHUNK_MS: u64 = 10_000;
loop {
if !STREAM_ACTIVE.load(std::sync::atomic::Ordering::SeqCst) { break; }
// Capture audio via spf-voice (in-process cpal, blocks for CHUNK_MS)
match spf_voice::listen(CHUNK_MS) {
Ok(pcm_i16) => {
if pcm_i16.is_empty() { continue; }
// Convert i16 → f32 [-1.0, 1.0] for Whisper mel spectrogram
let f32_samples: Vec<f32> = pcm_i16.iter()
.map(|&s| s as f32 / 32768.0)
.collect();
match stt.audio_to_mel(&f32_samples) {
Ok(mel) => match stt.decode_segment(&mel) {
Ok(text) if !text.is_empty() => {
eprintln!("[SPF-STREAM] → {}", text);
let lower = text.to_lowercase();
let trimmed = lower.trim().trim_matches(|c: char| !c.is_alphanumeric() && c != ' ');
// Voice command detection — specific phrases only
let is_end = trimmed.contains("end stream") || trimmed.contains("stop stream");
let is_enter = trimmed == "enter" || trimmed.ends_with(" enter")
|| trimmed == "new line" || trimmed == "newline";
let is_reply = trimmed == "reply" || trimmed.ends_with(" reply");
let is_clear = trimmed == "clear" || trimmed.starts_with("clear ");
let is_read_back = trimmed.contains("read back") || trimmed.contains("read it back");
let is_start = trimmed.contains("start stream");
let is_command = is_end || is_enter || is_reply || is_clear || is_read_back || is_start;
if is_command {
// Commands don't go into transcript — handle action directly
if is_clear {
if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() { q.clear(); }
eprintln!("[SPF-STREAM] CMD: clear — buffer cleared");
} else if is_read_back {
let current = TRANSCRIPT_QUEUE.lock()
.map(|q| q.join(" ")).unwrap_or_default();
if !current.is_empty() {
let _ = spf_voice::speak(&format!("You said: {}", current));
} else {
let _ = spf_voice::speak("Nothing to read back");
}
eprintln!("[SPF-STREAM] CMD: read back");
} else if is_reply {
if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() {
q.push("[REPLY]".to_string());
}
eprintln!("[SPF-STREAM] CMD: reply — marker pushed");
} else if is_enter {
if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() {
q.push("\n".to_string());
}
eprintln!("[SPF-STREAM] CMD: enter — newline pushed");
} else if is_start {
eprintln!("[SPF-STREAM] CMD: start stream — already running");
}
if is_end {
eprintln!("[SPF-STREAM] CMD: end stream — stopping");
STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst);
break;
}
} else {
// Normal transcript text — push to queue
if let Ok(mut q) = TRANSCRIPT_QUEUE.lock() {
q.push(text);
}
}
}
Ok(_) => {} // Silence or blank — Whisper returned nothing, continue
Err(e) => eprintln!("[SPF-STREAM] Transcribe error: {}", e),
},
Err(e) => eprintln!("[SPF-STREAM] Mel error: {}", e),
}
}
Err(e) => {
eprintln!("[SPF-STREAM] Listen error: {} — stopping stream", e);
STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst);
break;
}
}
}
STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst);
eprintln!("[SPF-STREAM] Streaming stopped");
});
Ok(())
}
/// Signal the streaming thread to stop. Exits after finishing its current chunk.
pub fn stream_off() {
STREAM_ACTIVE.store(false, std::sync::atomic::Ordering::SeqCst);
eprintln!("[SPF-STREAM] Stop signal sent");
}
/// Drain and return all transcribed text accumulated since the last call.
/// Returns empty vec when nothing is ready or no stream is active.
pub fn stream_read() -> Vec<String> {
TRANSCRIPT_QUEUE
.lock()
.map(|mut q| std::mem::take(&mut *q))
.unwrap_or_default()
}
// ============================================================================
// PIPER TTS — Neural Text-to-Speech via ONNX Runtime (feature = "voice-tts")
// ============================================================================
#[cfg(feature = "voice-tts")]
pub struct PiperTTS {
session: OrtSession,
config: VoiceConfig,
model_sample_rate: u32,
}
#[cfg(feature = "voice-tts")]
impl PiperTTS {
pub fn new(model_path: &str, config_path: &str) -> Result<Self, VoiceError> {
let session = OrtSession::builder()
.and_then(|b| b.commit_from_file(model_path))
.map_err(|e| VoiceError::DeviceError(format!("ONNX model load: {}", e)))?;
// Read Piper config JSON for sample rate
let config_data = std::fs::read_to_string(config_path)
.map_err(|e| VoiceError::ConfigError(format!("Piper config read: {}", e)))?;
let config_json: serde_json::Value = serde_json::from_str(&config_data)
.map_err(|e| VoiceError::ConfigError(format!("Piper config parse: {}", e)))?;
let model_sample_rate = config_json["audio"]["sample_rate"].as_u64().unwrap_or(22050) as u32;
Ok(Self {
session,
config: VoiceConfig::default(),
model_sample_rate,
})
}
}
#[cfg(feature = "voice-tts")]
impl TextToSpeech for PiperTTS {
fn synthesize(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError> {
// Piper expects phoneme IDs as i64 tensor
// Simple ASCII-to-phoneme mapping (Piper's phonemizer handles the rest)
let phoneme_ids: Vec<i64> = text.chars()
.map(|c| c as i64)
.collect();
let input_len = phoneme_ids.len();
let input_tensor = ort::Value::from_array(
ort::ArrayExtensions::into_dyn(
ndarray::Array2::from_shape_vec((1, input_len), phoneme_ids)
.map_err(|e| VoiceError::CodecError(format!("Tensor shape: {}", e)))?
)
).map_err(|e| VoiceError::CodecError(format!("Input tensor: {}", e)))?;
let input_lengths = ort::Value::from_array(
ndarray::Array1::from_vec(vec![input_len as i64]).into_dyn()
).map_err(|e| VoiceError::CodecError(format!("Length tensor: {}", e)))?;
let scales = ort::Value::from_array(
ndarray::Array1::from_vec(vec![0.667f32, 1.0, 0.8]).into_dyn()
).map_err(|e| VoiceError::CodecError(format!("Scales tensor: {}", e)))?;
let outputs = self.session.run(ort::inputs![input_tensor, input_lengths, scales]
.map_err(|e| VoiceError::CodecError(format!("Run inputs: {}", e)))?)
.map_err(|e| VoiceError::CodecError(format!("ONNX inference: {}", e)))?;
// Output is audio waveform as f32
let audio_f32: Vec<f32> = outputs[0]
.try_extract_tensor::<f32>()
.map_err(|e| VoiceError::CodecError(format!("Extract audio: {}", e)))?
.view()
.iter()
.copied()
.collect();
// Convert f32 audio to i16 PCM bytes
let pcm_bytes: Vec<u8> = audio_f32.iter()
.flat_map(|&s| {
let clamped = (s * 32767.0).clamp(-32768.0, 32767.0) as i16;
clamped.to_le_bytes()
})
.collect();
// Frame the audio
let frame_bytes = (self.model_sample_rate as usize * self.config.channels as usize * 2
* self.config.frame_duration_ms as usize) / 1000;
if frame_bytes == 0 {
return Err(VoiceError::ConfigError("Frame size zero".into()));
}
let timestamp = chrono::Utc::now().to_rfc3339();
let frames: Vec<VoiceFrame> = pcm_bytes.chunks(frame_bytes)
.enumerate()
.map(|(i, chunk)| VoiceFrame {
codec: "pcm".into(),
sample_rate: self.model_sample_rate,
channels: self.config.channels,
frame_duration_ms: self.config.frame_duration_ms,
data: chunk.to_vec(),
seq: i as u64,
from: String::new(),
timestamp: timestamp.clone(),
team_id: None,
})
.collect();
Ok(frames)
}
fn is_available(&self) -> bool {
true
}
fn available_voices(&self) -> Vec<String> {
vec!["piper-default".into()]
}
}
// ============================================================================
// VOICE SESSION — Pipeline state management (always compiles)
// ============================================================================
/// Manages the active voice pipeline state.
/// Uses trait objects to abstract over stub/real implementations.
/// Construction selects the best available backend for each component.
pub struct VoiceSession {
pub config: VoiceConfig,
input: Option<Box<dyn AudioInput>>,
output: Option<Box<dyn AudioOutput>>,
stt: Option<Box<dyn SpeechToText>>,
tts: Option<Box<dyn TextToSpeech>>,
active: bool,
frames_sent: u64,
frames_received: u64,
}
impl VoiceSession {
pub fn new(config: VoiceConfig) -> Self {
Self {
config,
input: None,
output: None,
stt: None,
tts: None,
active: false,
frames_sent: 0,
frames_received: 0,
}
}
/// Start the voice pipeline.
/// Audio I/O and TTS delegate to spf-voice crate (in-process FFI).
/// STT: CandleWhisperSTT if voice-stt feature enabled.
pub fn start(&mut self) -> Result<(), VoiceError> {
// Open persistent audio pipeline — cpal streams + espeak-ng + opus codec
// Streams stay alive until stop() is called. All spf_voice:: functions
// auto-route through the persistent pipeline once open.
spf_voice::open()
.map_err(|e| VoiceError::DeviceError(format!("spf_voice::open failed: {}", e)))?;
// Audio I/O: placeholder stubs — real I/O goes through spf_voice:: calls
self.input = Some(Box::new(StubAudioInput::new()));
self.output = Some(Box::new(StubAudioOutput::new()));
// STT: Candle Whisper if feature enabled
#[cfg(feature = "voice-stt")]
{
let model_dir = crate::paths::spf_root().join("LIVE/MODELS/whisper-tiny");
match CandleWhisperSTT::new(&model_dir) {
Ok(stt) => {
eprintln!("[SPF-VOICE] STT: Candle Whisper active");
self.stt = Some(Box::new(stt));
}
Err(e) => { eprintln!("[SPF-VOICE] Whisper STT unavailable: {}", e); }
}
}
// TTS + audio I/O: handled by spf-voice crate — report status
let svs = spf_voice::status();
eprintln!("[SPF-VOICE] spf-voice: tts={} input={} output={} codec={}",
svs.tts_available, svs.input_available, svs.output_available, svs.codec_available);
// Piper TTS (neural, optional — voice-tts feature)
#[cfg(feature = "voice-tts")]
{
let model_dir = crate::paths::spf_root().join("LIVE/MODELS/piper");
let model = model_dir.join("en_US-lessac-medium.onnx");
let cfg = model_dir.join("en_US-lessac-medium.onnx.json");
if model.exists() && cfg.exists() {
match PiperTTS::new(&model.to_string_lossy(), &cfg.to_string_lossy()) {
Ok(piper) => {
self.tts = Some(Box::new(piper));
eprintln!("[SPF-VOICE] TTS: Piper neural TTS active");
}
Err(e) => { eprintln!("[SPF-VOICE] Piper TTS unavailable: {}", e); }
}
}
}
self.active = true;
eprintln!("[SPF-VOICE] Session started");
Ok(())
}
/// Stop all voice pipeline components
pub fn stop(&mut self) {
// Close persistent audio pipeline — drops cpal streams + opus codec
spf_voice::close();
// Drop trait objects — stubs release cleanly.
self.input = None;
self.output = None;
self.stt = None;
self.tts = None;
self.active = false;
self.frames_sent = 0;
self.frames_received = 0;
eprintln!("[SPF-VOICE] Session stopped");
}
pub fn is_active(&self) -> bool {
self.active
}
/// Get current voice pipeline status — queries spf-voice crate for real hardware state
pub fn status(&self) -> VoiceStatus {
let svs = spf_voice::status();
VoiceStatus {
audio_input_available: svs.input_available,
audio_output_available: svs.output_available,
stt_available: self.stt.as_ref().map_or(false, |s| s.is_available()),
// tts: spf-voice espeak-ng FFI takes priority; fall back to Piper trait object if loaded
tts_available: svs.tts_available
|| self.tts.as_ref().map_or(false, |t| t.is_available()),
pipeline_open: svs.pipeline_open,
codec: self.config.codec.clone(),
sample_rate: self.config.sample_rate,
channels: self.config.channels,
}
}
/// Capture one audio frame via spf-voice (in-process cpal), encode with opus, return VoiceFrame for mesh
pub fn capture_frame(&mut self, from_identity: &str) -> Result<VoiceFrame, VoiceError> {
let pcm_i16 = spf_voice::listen(self.config.frame_duration_ms as u64)
.map_err(|e| VoiceError::DeviceError(e.to_string()))?;
let (codec, encoded) = match spf_voice::encode(&pcm_i16) {
Ok(data) => ("opus".to_string(), data),
Err(_) => {
// Fall back to raw PCM bytes if encode fails
let pcm_bytes: Vec<u8> = pcm_i16.iter()
.flat_map(|s| s.to_le_bytes())
.collect();
("pcm".to_string(), pcm_bytes)
}
};
self.frames_sent += 1;
Ok(VoiceFrame {
codec,
sample_rate: self.config.sample_rate,
channels: self.config.channels,
frame_duration_ms: self.config.frame_duration_ms,
data: encoded,
seq: self.frames_sent,
from: from_identity.to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
team_id: None,
})
}
/// Receive a VoiceFrame from mesh, decode with spf-voice opus, play through spf-voice cpal
pub fn play_frame(&mut self, frame: &VoiceFrame) -> Result<(), VoiceError> {
let pcm_i16: Vec<i16> = if frame.codec == "opus" {
spf_voice::decode(&frame.data)
.map_err(|e| VoiceError::CodecError(e.to_string()))?
} else {
// Raw PCM bytes → i16 (little-endian passthrough)
frame.data.chunks_exact(2)
.map(|pair| i16::from_le_bytes([pair[0], pair[1]]))
.collect()
};
spf_voice::play(&pcm_i16)
.map_err(|e| VoiceError::DeviceError(e.to_string()))?;
self.frames_received += 1;
Ok(())
}
/// Transcribe accumulated audio frames to text (STT)
pub fn transcribe(&mut self, frames: &[VoiceFrame]) -> Result<String, VoiceError> {
let stt = self.stt.as_mut()
.ok_or_else(|| VoiceError::NotAvailable("No STT engine".into()))?;
stt.transcribe(frames)
}
/// Synthesize text and play audio via spf-voice (in-process espeak-ng FFI).
/// Falls back to Piper neural TTS trait object if loaded (voice-tts feature).
/// Returns empty vec — audio is played in-process, not transmitted as mesh frames.
pub fn speak(&mut self, text: &str) -> Result<Vec<VoiceFrame>, VoiceError> {
// Primary: spf-voice in-process TTS (espeak-ng FFI)
match spf_voice::speak(text) {
Ok(()) => return Ok(vec![]),
Err(e) => {
eprintln!("[SPF-VOICE] spf-voice speak failed: {} — trying fallback", e);
}
}
// Fallback: Piper neural TTS if loaded (voice-tts feature)
if let Some(ref mut tts) = self.tts {
tts.synthesize(text)
} else {
Err(VoiceError::NotAvailable("No TTS engine available".into()))
}
}
/// Get pipeline statistics
pub fn stats(&self) -> serde_json::Value {
serde_json::json!({
"active": self.active,
"frames_sent": self.frames_sent,
"frames_received": self.frames_received,
"status": self.status().to_json_value(),
})
}
}
// ============================================================================
// VOICE STATUS (for MCP reporting)
// ============================================================================
/// Voice system status snapshot
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VoiceStatus {
pub audio_input_available: bool,
pub audio_output_available: bool,
pub stt_available: bool,
pub tts_available: bool,
pub pipeline_open: bool,
pub codec: String,
pub sample_rate: u32,
pub channels: u8,
}
impl VoiceStatus {
/// Build status from current stub state — queries spf-voice for real pipeline state
pub fn from_stubs() -> Self {
let config = VoiceConfig::default();
let svs = spf_voice::status();
Self {
audio_input_available: svs.input_available,
audio_output_available: svs.output_available,
stt_available: false,
tts_available: svs.tts_available,
pipeline_open: svs.pipeline_open,
codec: config.codec,
sample_rate: config.sample_rate,
channels: config.channels,
}
}
/// Build status from a VoiceSession
pub fn from_session(session: &VoiceSession) -> Self {
session.status()
}
pub fn to_json_value(&self) -> serde_json::Value {
let mode = if self.pipeline_open && (self.audio_input_available || self.audio_output_available) {
"pipeline_active"
} else if self.audio_input_available || self.audio_output_available {
"active"
} else if self.tts_available {
"tts_only"
} else {
"stubs_only"
};
serde_json::json!({
"status": mode,
"pipeline_open": self.pipeline_open,
"audio_input": self.audio_input_available,
"audio_output": self.audio_output_available,
"stt": self.stt_available,
"tts": self.tts_available,
"config": {
"codec": self.codec,
"sample_rate": self.sample_rate,
"channels": self.channels,
}
})
}
}
// ============================================================================
// MESH TRANSPORT HELPERS
// ============================================================================
/// Serialize a voice frame for mesh transport (StreamType::VoiceAudio = 0x03)
pub fn frame_to_bytes(frame: &VoiceFrame) -> Result<Vec<u8>, String> {
serde_json::to_vec(frame).map_err(|e| format!("Serialize error: {}", e))
}
/// Deserialize a voice frame from mesh transport
pub fn frame_from_bytes(data: &[u8]) -> Result<VoiceFrame, String> {
serde_json::from_slice(data).map_err(|e| format!("Deserialize error: {}", e))
}
// ============================================================================
// TEAM VOICE CHANNEL SYSTEM (Block TM-V)
// ============================================================================
/// A member currently active (joined) in a team voice channel.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ActiveMember {
/// Full mesh peer key of this member
pub peer_key: String,
/// Display name (short key prefix if unknown)
pub name: String,
/// RFC3339 timestamp when this member joined
pub joined_at: String,
}
/// A team voice channel — persistent channel with invited members and active participants.
#[derive(Debug, Clone)]
pub struct TeamChannel {
/// Unique identifier for this channel (slugified name + short id)
pub team_id: String,
/// Human-readable channel name
pub name: String,
/// All invited peer keys (may or may not be currently active)
pub members: Vec<String>,
/// Currently joined members — present and receiving audio
pub active_members: Vec<ActiveMember>,
/// Audio configuration for this channel
pub config: VoiceConfig,
/// RFC3339 timestamp when channel was created
pub created_at: String,
/// Whether this channel is open (false = archived/closed)
pub is_open: bool,
}
impl TeamChannel {
/// Create a new team channel with default audio config.
pub fn new(team_id: &str, name: &str) -> Self {
Self {
team_id: team_id.to_string(),
name: name.to_string(),
members: Vec::new(),
active_members: Vec::new(),
config: VoiceConfig::default(),
created_at: chrono::Utc::now().to_rfc3339(),
is_open: true,
}
}
/// Add a peer key to the invited members list (idempotent).
pub fn add_member(&mut self, peer_key: &str) {
if !self.members.contains(&peer_key.to_string()) {
self.members.push(peer_key.to_string());
}
}
/// Join the channel — add to active_members if not already present.
pub fn join(&mut self, peer_key: &str, display_name: &str) {
let already_active = self.active_members.iter().any(|m| m.peer_key == peer_key);
if !already_active {
self.active_members.push(ActiveMember {
peer_key: peer_key.to_string(),
name: display_name.to_string(),
joined_at: chrono::Utc::now().to_rfc3339(),
});
}
// Ensure they are also in the members list
self.add_member(peer_key);
}
/// Leave the channel — remove from active_members. Returns true if they were present.
pub fn leave(&mut self, peer_key: &str) -> bool {
let before = self.active_members.len();
self.active_members.retain(|m| m.peer_key != peer_key);
self.active_members.len() < before
}
/// Number of currently active (joined) members.
pub fn active_count(&self) -> usize {
self.active_members.len()
}
/// Number of invited members (may include inactive).
pub fn member_count(&self) -> usize {
self.members.len()
}
/// Peer keys of all currently active members.
pub fn active_peer_keys(&self) -> Vec<String> {
self.active_members.iter().map(|m| m.peer_key.clone()).collect()
}
/// One-line summary for list display.
pub fn summary_line(&self) -> String {
format!("[{}] {} — {} invited, {} active, {}",
self.team_id, self.name,
self.member_count(), self.active_count(),
if self.is_open { "open" } else { "closed" })
}
}
/// Config entry for a single team in voice_teams.json.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VoiceTeamEntry {
pub team_id: String,
pub name: String,
#[serde(default)]
pub members: Vec<String>,
}
/// Top-level structure of LIVE/CONFIG/voice_teams.json.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VoiceTeamsConfig {
/// team_id of the channel that always exists (created at startup)
pub default_team: String,
/// If true, incoming calls are automatically accepted without user action
pub auto_accept_calls: bool,
/// Preconfigured team channels loaded at startup
#[serde(default)]
pub teams: Vec<VoiceTeamEntry>,
}
impl Default for VoiceTeamsConfig {
fn default() -> Self {
Self {
default_team: "default".to_string(),
auto_accept_calls: false,
teams: vec![VoiceTeamEntry {
team_id: "default".to_string(),
name: "Default Channel".to_string(),
members: Vec::new(),
}],
}
}
}
/// Global team channel registry — persists across tool calls within a session.
/// Key: team_id Value: TeamChannel
pub static VOICE_TEAMS: std::sync::LazyLock<std::sync::Mutex<std::collections::HashMap<String, TeamChannel>>> =
std::sync::LazyLock::new(|| std::sync::Mutex::new(std::collections::HashMap::new()));
/// Initialise the team channel registry from voice_teams.json.
/// Called once at startup from mcp::run() and mcp::run_worker().
/// Safe to call multiple times — idempotent (existing channels are not overwritten).
pub fn init_voice_teams() {
let config_path = crate::paths::spf_root()
.join("LIVE/CONFIG/voice_teams.json");
let cfg: VoiceTeamsConfig = if config_path.exists() {
match std::fs::read_to_string(&config_path) {
Ok(s) => serde_json::from_str(&s).unwrap_or_default(),
Err(e) => {
eprintln!("[SPF-VOICE] voice_teams.json read error: {} — using defaults", e);
VoiceTeamsConfig::default()
}
}
} else {
eprintln!("[SPF-VOICE] voice_teams.json not found at {:?} — using defaults", config_path);
VoiceTeamsConfig::default()
};
let mut teams = VOICE_TEAMS.lock().unwrap_or_else(|e| e.into_inner());
for entry in &cfg.teams {
// Idempotent: only insert if not already present
teams.entry(entry.team_id.clone()).or_insert_with(|| {
let mut ch = TeamChannel::new(&entry.team_id, &entry.name);
for m in &entry.members {
ch.add_member(m);
}
ch
});
}
// Always ensure the default channel exists even if not in config
teams.entry(cfg.default_team.clone()).or_insert_with(|| {
TeamChannel::new(&cfg.default_team, "Default Channel")
});
eprintln!("[SPF-VOICE] Team channels ready: {} channel(s) loaded", teams.len());
}
// ============================================================================
// MESH STREAM HANDLER
// ============================================================================
//
// SECURITY CONTRACT — enforced unconditionally:
// - VoiceAudio stream carries AUDIO FRAMES ONLY (VoiceFrame structs)
// - ALL call/team signaling travels via ToolRpc stream → gate → spf_voice_mode/spf_voice_team
// - This function NEVER calls dispatch::call(), handle_tool_call(), or any tool executor
// - Non-audio payloads (acks, status responses) are logged and silently dropped
// - Unknown payload types are ALWAYS silently dropped — no error propagation
// - No code path from here can execute tools or bypass the gate
//
// ============================================================================
/// Wrap a VoiceFrame into a framing::Frame for sending over the mesh VoiceAudio stream.
/// This is the ONLY frame type that should be written to a VoiceAudio stream.
pub fn voice_audio_frame(voice_frame: &VoiceFrame) -> Result<crate::framing::Frame, String> {
let payload = frame_to_bytes(voice_frame)?;
Ok(crate::framing::Frame::new(crate::framing::StreamType::VoiceAudio, payload))
}
/// Handle an incoming VoiceAudio mesh frame.
///
/// SECURITY: This function carries audio data ONLY.
/// All call signaling (ring/accept/reject/end) and team signaling (join/leave/invite)
/// travels via the ToolRpc mesh stream → gate → spf_voice_mode / spf_voice_team handlers.
/// This function CANNOT and WILL NOT execute tools, call dispatch, or bypass the gate.
///
/// Routing:
/// VoiceFrame with team_id = Some(id) → routed to VOICE_TEAMS[id] session
/// VoiceFrame with team_id = None → routed to VOICE_SESSION (peer-to-peer)
/// Non-VoiceFrame JSON (acks/status) → logged, silently dropped, no response
/// Unknown/malformed payload → silently dropped, no response
///
/// Called from: mesh.rs stream_router() for StreamType::VoiceAudio (0x03)
pub fn handle_mesh_voice(frame: &crate::framing::Frame, peer_key: &str) -> Option<crate::framing::Frame> {
let peer_short = &peer_key[..8.min(peer_key.len())];
// Attempt to deserialize payload as a VoiceFrame (audio data)
match frame_from_bytes(&frame.payload) {
Ok(voice_frame) => {
eprintln!("[SPF-VOICE] Audio frame from {} seq={} codec={} {} bytes{}",
peer_short, voice_frame.seq, voice_frame.codec, voice_frame.data.len(),
voice_frame.team_id.as_deref().map(|id| format!(" team={}", id)).unwrap_or_default());
// Route to the correct audio session based on team_id
if let Some(ref team_id) = voice_frame.team_id {
// Team channel frame — route to that team's session
if let Ok(mut teams) = VOICE_TEAMS.lock() {
if let Some(channel) = teams.get_mut(team_id) {
// Verify the sender is an active member of this team
let is_member = channel.active_members.iter()
.any(|m| m.peer_key == peer_key);
if is_member {
// Team channels share the global VOICE_SESSION for audio output
// (one speaker output, multiple senders in the same channel)
if let Ok(mut lock) = VOICE_SESSION.lock() {
if let Some(ref mut vs) = *lock {
if vs.is_active() {
if let Err(e) = vs.play_frame(&voice_frame) {
eprintln!("[SPF-VOICE] Team playback error team={} seq={}: {}",
team_id, voice_frame.seq, e);
}
}
}
}
} else {
// Sender not in active members — reject silently
eprintln!("[SPF-VOICE] Rejected team audio from non-member {} team={}",
peer_short, team_id);
return None;
}
} else {
// Unknown team — reject silently
eprintln!("[SPF-VOICE] Rejected audio for unknown team={} from {}",
team_id, peer_short);
return None;
}
}
} else {
// Peer-to-peer frame — route to global VOICE_SESSION
// Only play if there is an active call with this specific peer
let in_active_call = {
let cs = CALL_STATE.lock().unwrap_or_else(|e| e.into_inner());
cs.as_ref().map_or(false, |c| c.peer_key == peer_key && c.is_active())
};
if in_active_call {
if let Ok(mut lock) = VOICE_SESSION.lock() {
if let Some(ref mut vs) = *lock {
if vs.is_active() {
if let Err(e) = vs.play_frame(&voice_frame) {
eprintln!("[SPF-VOICE] Peer playback error seq={}: {}", voice_frame.seq, e);
}
}
}
}
} else {
// No active call with this peer — reject silently
eprintln!("[SPF-VOICE] Rejected audio from {} — no active call", peer_short);
return None;
}
}
// Send audio acknowledgement
let ack = serde_json::json!({
"type": "voice_ack",
"status": "received",
"seq": voice_frame.seq,
"codec": voice_frame.codec,
"bytes_received": voice_frame.data.len(),
"voice_available": spf_voice::status().output_available,
});
Some(crate::framing::Frame::new(
crate::framing::StreamType::VoiceAudio,
serde_json::to_vec(&ack).unwrap_or_default(),
))
}
Err(_parse_err) => {
// Payload is not a VoiceFrame — check if it is a known response type
// (acks and status responses are expected when we send audio to peers)
// SECURITY: We only READ the type field. No action is taken. No tools executed.
if let Ok(json_val) = serde_json::from_slice::<serde_json::Value>(&frame.payload) {
match json_val.get("type").and_then(|t| t.as_str()) {
Some("voice_ack") => {
// Expected: acknowledgement of audio we sent to this peer
let seq = json_val.get("seq").and_then(|s| s.as_u64()).unwrap_or(0);
eprintln!("[SPF-VOICE] Ack from {} seq={}", peer_short, seq);
// No response needed — drop cleanly
None
}
Some("voice_status") => {
// Expected: status response (e.g., peer reports no voice feature)
let status = json_val.get("status").and_then(|s| s.as_str()).unwrap_or("unknown");
eprintln!("[SPF-VOICE] Status from {}: {}", peer_short, status);
// No response needed — drop cleanly
None
}
Some(other_type) => {
// Unknown type on VoiceAudio stream — silent drop
// SECURITY: unknown control types are always rejected here.
// All legitimate control (ring/accept/team/join) travels via ToolRpc+gate.
eprintln!("[SPF-VOICE] Rejected unknown type '{}' on VoiceAudio stream from {} — must use ToolRpc for signaling",
other_type, peer_short);
None
}
None => {
// JSON but no type field — silent drop
eprintln!("[SPF-VOICE] Untyped JSON on VoiceAudio stream from {} — dropped",
peer_short);
None
}
}
} else {
// Not JSON, not VoiceFrame — binary garbage or corrupted frame — silent drop
eprintln!("[SPF-VOICE] Non-JSON non-VoiceFrame payload from {} ({} bytes) — dropped",
peer_short, frame.payload.len());
None
}
}
}
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_voice_config_defaults() {
let config = VoiceConfig::default();
assert_eq!(config.codec, "opus");
assert_eq!(config.sample_rate, 16000);
assert_eq!(config.channels, 1);
assert_eq!(config.frame_duration_ms, 20);
assert!(config.vad_enabled);
}
#[test]
fn test_pcm_frame_bytes() {
let config = VoiceConfig::default();
// 16000 Hz × 1 ch × 2 bytes × 20ms / 1000 = 640 bytes
assert_eq!(config.pcm_frame_bytes(), 640);
}
#[test]
fn test_stub_audio_input() {
let mut input = StubAudioInput::new();
assert!(!input.is_available());
assert!(input.read_frame().is_err());
}
#[test]
fn test_stub_audio_output() {
let mut output = StubAudioOutput::new();
assert!(!output.is_available());
assert!(output.write_frame(&[0u8; 640]).is_err());
}
#[test]
fn test_stub_stt() {
let mut stt = StubSTT;
assert!(!stt.is_available());
assert!(stt.transcribe(&[]).is_err());
assert!(stt.supported_languages().is_empty());
}
#[test]
fn test_stub_tts() {
let mut tts = StubTTS;
assert!(!tts.is_available());
assert!(tts.synthesize("hello").is_err());
assert!(tts.available_voices().is_empty());
}
#[test]
fn test_voice_status_stubs() {
let status = VoiceStatus::from_stubs();
assert!(!status.audio_input_available);
assert!(!status.stt_available);
assert_eq!(status.codec, "opus");
let json = status.to_json_value();
assert_eq!(json["status"], "stubs_only");
}
#[test]
fn test_voice_error_display() {
let err = VoiceError::NotAvailable("test".into());
assert_eq!(format!("{}", err), "Not available: test");
let err = VoiceError::CodecError("bad".into());
assert_eq!(format!("{}", err), "Codec error: bad");
}
#[test]
fn test_voice_frame_serialize_roundtrip() {
let frame = VoiceFrame {
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
data: vec![0xAA, 0xBB, 0xCC],
seq: 42,
from: "531d83fa".into(),
timestamp: "2026-02-28T12:00:00Z".into(),
team_id: None,
};
let bytes = frame_to_bytes(&frame).unwrap();
let loaded = frame_from_bytes(&bytes).unwrap();
assert_eq!(loaded.codec, "opus");
assert_eq!(loaded.seq, 42);
assert_eq!(loaded.data, vec![0xAA, 0xBB, 0xCC]);
assert_eq!(loaded.from, "531d83fa");
}
// ================================================================
// spf-voice integration tests (replaces EspeakTTS subprocess tests)
// ================================================================
#[test]
fn test_spf_voice_status_no_panic() {
// spf_voice::status() must not panic regardless of hardware state
let svs = spf_voice::status();
// Fields are bool — just verify they exist and are readable
let _ = svs.tts_available;
let _ = svs.input_available;
let _ = svs.output_available;
let _ = svs.codec_available;
}
#[test]
fn test_voice_session_new() {
let config = VoiceConfig::default();
let session = VoiceSession::new(config);
assert!(!session.is_active());
assert_eq!(session.frames_sent, 0);
assert_eq!(session.frames_received, 0);
assert!(session.input.is_none());
assert!(session.output.is_none());
assert!(session.stt.is_none());
assert!(session.tts.is_none());
}
#[test]
fn test_voice_session_start_stop() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
// Without voice features, start() uses stubs — always succeeds
let result = session.start();
assert!(result.is_ok());
assert!(session.is_active());
// Input/output set to stubs (not available but present)
assert!(session.input.is_some());
assert!(session.output.is_some());
session.stop();
assert!(!session.is_active());
assert!(session.input.is_none());
assert!(session.output.is_none());
assert!(session.stt.is_none());
assert!(session.tts.is_none());
assert_eq!(session.frames_sent, 0);
assert_eq!(session.frames_received, 0);
}
#[test]
fn test_voice_session_status_after_start() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
session.start().unwrap();
let status = session.status();
// Audio I/O availability depends on spf-voice hardware state — just verify fields exist
let _ = status.audio_input_available;
let _ = status.audio_output_available;
let _ = status.stt_available;
assert_eq!(status.codec, "opus");
assert_eq!(status.sample_rate, 16000);
assert_eq!(status.channels, 1);
session.stop();
}
#[test]
fn test_voice_session_stats_json() {
let config = VoiceConfig::default();
let session = VoiceSession::new(config);
let stats = session.stats();
assert_eq!(stats["active"], false);
assert_eq!(stats["frames_sent"], 0);
assert_eq!(stats["frames_received"], 0);
assert!(stats["status"].is_object());
}
#[test]
fn test_voice_session_capture_without_input() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
// No input initialized (not started) → NotAvailable error
let result = session.capture_frame("test_identity");
assert!(result.is_err());
match result {
Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("input")),
other => panic!("Expected NotAvailable, got: {:?}", other),
}
}
#[test]
fn test_voice_session_capture_with_stubs() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
session.start().unwrap();
// Stub input → read_frame returns NotAvailable
let result = session.capture_frame("test_identity");
assert!(result.is_err());
session.stop();
}
#[test]
fn test_voice_session_play_without_output() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
let frame = VoiceFrame {
codec: "pcm".into(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
data: vec![0u8; 640],
seq: 1,
from: "peer".into(),
timestamp: "2026-03-01T00:00:00Z".into(),
team_id: None,
};
// No output (not started) → NotAvailable
let result = session.play_frame(&frame);
assert!(result.is_err());
}
#[test]
fn test_voice_session_speak_without_tts() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
// No TTS initialized → NotAvailable
let result = session.speak("hello world");
assert!(result.is_err());
match result {
Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("TTS")),
other => panic!("Expected NotAvailable, got: {:?}", other),
}
}
#[test]
fn test_voice_session_transcribe_without_stt() {
let config = VoiceConfig::default();
let mut session = VoiceSession::new(config);
let result = session.transcribe(&[]);
assert!(result.is_err());
match result {
Err(VoiceError::NotAvailable(msg)) => assert!(msg.contains("STT")),
other => panic!("Expected NotAvailable, got: {:?}", other),
}
}
#[test]
fn test_voice_status_from_session() {
let config = VoiceConfig::default();
let session = VoiceSession::new(config);
let status = VoiceStatus::from_session(&session);
assert!(!status.audio_input_available);
assert!(!status.audio_output_available);
assert!(!status.stt_available);
assert!(!status.tts_available);
assert_eq!(status.codec, "opus");
}
#[test]
fn test_voice_status_mode_active() {
let status = VoiceStatus {
audio_input_available: true,
audio_output_available: true,
stt_available: false,
tts_available: false,
pipeline_open: false,
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
};
let json = status.to_json_value();
assert_eq!(json["status"], "active");
assert_eq!(json["audio_input"], true);
assert_eq!(json["audio_output"], true);
}
#[test]
fn test_voice_status_mode_tts_only() {
let status = VoiceStatus {
audio_input_available: false,
audio_output_available: false,
stt_available: false,
tts_available: true,
pipeline_open: false,
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
};
let json = status.to_json_value();
assert_eq!(json["status"], "tts_only");
assert_eq!(json["tts"], true);
}
#[test]
fn test_voice_status_mode_stubs_only() {
let status = VoiceStatus {
audio_input_available: false,
audio_output_available: false,
stt_available: false,
tts_available: false,
pipeline_open: false,
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
};
let json = status.to_json_value();
assert_eq!(json["status"], "stubs_only");
}
#[test]
fn test_voice_status_input_only_counts_as_active() {
// Even one audio direction = "active" mode
let status = VoiceStatus {
audio_input_available: true,
audio_output_available: false,
stt_available: false,
tts_available: false,
pipeline_open: false,
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
};
let json = status.to_json_value();
assert_eq!(json["status"], "active");
}
#[test]
fn test_handle_mesh_voice_valid_frame() {
let frame = VoiceFrame {
codec: "pcm".into(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
data: vec![0x00; 640],
seq: 7,
from: "test_peer".into(),
timestamp: "2026-03-01T00:00:00Z".into(),
team_id: None,
};
let serialized = frame_to_bytes(&frame).unwrap();
let mesh_frame = crate::framing::Frame::new(
crate::framing::StreamType::VoiceAudio,
serialized,
);
let response = handle_mesh_voice(&mesh_frame, "abcdef1234567890");
assert!(response.is_some());
let resp = response.unwrap();
let payload: serde_json::Value = serde_json::from_slice(&resp.payload).unwrap();
assert_eq!(payload["type"], "voice_ack");
assert_eq!(payload["status"], "received");
assert_eq!(payload["seq"], 7);
assert_eq!(payload["codec"], "pcm");
assert_eq!(payload["bytes_received"], 640);
}
#[test]
fn test_handle_mesh_voice_invalid_payload() {
let mesh_frame = crate::framing::Frame::new(
crate::framing::StreamType::VoiceAudio,
vec![0xFF, 0xFE, 0xFD], // Not valid JSON
);
// Non-JSON payload is silently dropped — no response (security: no info disclosure)
let response = handle_mesh_voice(&mesh_frame, "deadbeef12345678");
assert!(response.is_none());
}
#[test]
fn test_handle_mesh_voice_empty_payload() {
let mesh_frame = crate::framing::Frame::new(
crate::framing::StreamType::VoiceAudio,
vec![],
);
// Empty payload is silently dropped — returns None
let response = handle_mesh_voice(&mesh_frame, "1234abcd");
assert!(response.is_none());
}
#[test]
fn test_handle_mesh_voice_short_peer_key() {
// Peer key shorter than 8 chars — should not panic
let frame = VoiceFrame {
codec: "pcm".into(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
data: vec![0x01],
seq: 0,
from: "short".into(),
timestamp: "2026-03-01T00:00:00Z".into(),
team_id: None,
};
let serialized = frame_to_bytes(&frame).unwrap();
let mesh_frame = crate::framing::Frame::new(
crate::framing::StreamType::VoiceAudio,
serialized,
);
// 4-char key — tests the min() guard in peer_short
let response = handle_mesh_voice(&mesh_frame, "abcd");
assert!(response.is_some());
}
#[test]
fn test_voice_config_custom_stereo() {
let config = VoiceConfig {
codec: "pcm".into(),
sample_rate: 48000,
channels: 2,
frame_duration_ms: 10,
bitrate: 64000,
vad_enabled: false,
};
// 48000 × 2 × 2 × 10 / 1000 = 1920 bytes per frame
assert_eq!(config.pcm_frame_bytes(), 1920);
assert!(!config.vad_enabled);
}
#[test]
fn test_voice_config_8khz_mono() {
let config = VoiceConfig {
codec: "opus".into(),
sample_rate: 8000,
channels: 1,
frame_duration_ms: 20,
bitrate: 12000,
vad_enabled: true,
};
// 8000 × 1 × 2 × 20 / 1000 = 320 bytes
assert_eq!(config.pcm_frame_bytes(), 320);
}
#[test]
fn test_voice_error_all_variants() {
let errors = vec![
VoiceError::NotAvailable("na".into()),
VoiceError::CodecError("ce".into()),
VoiceError::DeviceError("de".into()),
VoiceError::ConfigError("cfg".into()),
];
let expected = vec![
"Not available: na",
"Codec error: ce",
"Device error: de",
"Config error: cfg",
];
for (err, exp) in errors.iter().zip(expected.iter()) {
assert_eq!(format!("{}", err), *exp);
}
}
#[test]
fn test_frame_from_bytes_error() {
let result = frame_from_bytes(b"not json");
assert!(result.is_err());
let err_msg = result.unwrap_err();
assert!(err_msg.contains("Deserialize"));
}
#[test]
fn test_voice_frame_clone() {
let frame = VoiceFrame {
codec: "opus".into(),
sample_rate: 16000,
channels: 1,
frame_duration_ms: 20,
data: vec![1, 2, 3],
seq: 99,
from: "sender".into(),
timestamp: "2026-03-01T00:00:00Z".into(),
team_id: None,
};
let cloned = frame.clone();
assert_eq!(cloned.seq, 99);
assert_eq!(cloned.data, vec![1, 2, 3]);
assert_eq!(cloned.from, "sender");
}
}