SPFsmartGATE / src /tensor.rs
JosephStoneCellAI's picture
Upload 45 files
1269259 verified
// SPF Smart Gateway - Tensor Engine
// Copyright 2026 Joseph Stone - All Rights Reserved
//
// N-dimensional tensor operations for SPF Transformer.
// Pure Rust. No external ML dependencies.
// ARM NEON SIMD acceleration with scalar fallback.
//
// This is the mathematical foundation — all transformer layers build on this.
// Depends on: nothing (Layer 0)
use std::fmt;
// ============================================================================
// TENSOR STRUCT
// ============================================================================
/// N-dimensional tensor with f32 data and shape metadata.
/// Storage is row-major (C-order): last dimension varies fastest.
#[derive(Clone)]
pub struct Tensor {
/// Flat data storage
pub data: Vec<f32>,
/// Shape dimensions (e.g., [batch, seq_len, d_model])
pub shape: Vec<usize>,
}
impl Tensor {
/// Create a tensor with given shape, initialized to zeros
pub fn zeros(shape: &[usize]) -> Self {
let size = shape.iter().product::<usize>();
Self {
data: vec![0.0; size],
shape: shape.to_vec(),
}
}
/// Create a tensor with given shape, initialized to ones
pub fn ones(shape: &[usize]) -> Self {
let size = shape.iter().product::<usize>();
Self {
data: vec![1.0; size],
shape: shape.to_vec(),
}
}
/// Create a tensor from raw data and shape
pub fn from_data(data: Vec<f32>, shape: Vec<usize>) -> Result<Self, String> {
let expected = shape.iter().product::<usize>();
if data.len() != expected {
return Err(format!(
"Data length {} doesn't match shape {:?} (expected {})",
data.len(), shape, expected
));
}
Ok(Self { data, shape })
}
/// Create a tensor with random uniform values in [0, 1)
/// Uses simple xorshift for reproducibility without external deps
pub fn rand(shape: &[usize], seed: u64) -> Self {
let size = shape.iter().product::<usize>();
let mut data = Vec::with_capacity(size);
let mut state = seed;
for _ in 0..size {
state = xorshift64(state);
data.push((state as f32) / (u64::MAX as f32));
}
Self {
data,
shape: shape.to_vec(),
}
}
/// Create a tensor with random normal values (mean=0, std=1)
/// Box-Muller transform from uniform random pairs
pub fn randn(shape: &[usize], seed: u64) -> Self {
let size = shape.iter().product::<usize>();
let mut data = Vec::with_capacity(size);
let mut state = seed;
for i in 0..size {
state = xorshift64(state);
let u1 = (state as f64) / (u64::MAX as f64);
state = xorshift64(state);
let u2 = (state as f64) / (u64::MAX as f64);
// Box-Muller: avoid log(0) by clamping
let u1 = u1.max(1e-10);
let val = if i % 2 == 0 {
(-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
} else {
(-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).sin()
};
data.push(val as f32);
}
Self {
data,
shape: shape.to_vec(),
}
}
/// Total number of elements
pub fn numel(&self) -> usize {
self.data.len()
}
/// Number of dimensions
pub fn ndim(&self) -> usize {
self.shape.len()
}
// ========================================================================
// SHAPE OPERATIONS
// ========================================================================
/// Reshape tensor (total elements must match)
pub fn reshape(&self, new_shape: &[usize]) -> Result<Self, String> {
let new_size: usize = new_shape.iter().product();
if new_size != self.numel() {
return Err(format!(
"Cannot reshape {:?} ({}) to {:?} ({})",
self.shape, self.numel(), new_shape, new_size
));
}
Ok(Self {
data: self.data.clone(),
shape: new_shape.to_vec(),
})
}
/// Transpose a 2D tensor
pub fn transpose_2d(&self) -> Result<Self, String> {
if self.ndim() != 2 {
return Err(format!("transpose_2d requires 2D tensor, got {}D", self.ndim()));
}
let (rows, cols) = (self.shape[0], self.shape[1]);
let mut out = vec![0.0f32; rows * cols];
for r in 0..rows {
for c in 0..cols {
out[c * rows + r] = self.data[r * cols + c];
}
}
Tensor::from_data(out, vec![cols, rows])
}
/// Slice along first dimension: tensor[start..end]
pub fn slice(&self, start: usize, end: usize) -> Result<Self, String> {
if self.ndim() == 0 {
return Err("Cannot slice scalar tensor".to_string());
}
if start >= end || end > self.shape[0] {
return Err(format!(
"Invalid slice [{}..{}) for dim 0 size {}",
start, end, self.shape[0]
));
}
let inner_size: usize = self.shape[1..].iter().product();
let data = self.data[start * inner_size..end * inner_size].to_vec();
let mut new_shape = self.shape.clone();
new_shape[0] = end - start;
Tensor::from_data(data, new_shape)
}
/// Concatenate tensors along first dimension
pub fn concat(tensors: &[&Tensor]) -> Result<Self, String> {
if tensors.is_empty() {
return Err("Cannot concat empty list".to_string());
}
let base_shape = &tensors[0].shape[1..];
for (i, t) in tensors.iter().enumerate().skip(1) {
if t.shape[1..] != *base_shape {
return Err(format!(
"Shape mismatch at index {}: {:?} vs {:?}",
i, &t.shape[1..], base_shape
));
}
}
let total_first: usize = tensors.iter().map(|t| t.shape[0]).sum();
let mut data = Vec::with_capacity(total_first * base_shape.iter().product::<usize>());
for t in tensors {
data.extend_from_slice(&t.data);
}
let mut new_shape = vec![total_first];
new_shape.extend_from_slice(base_shape);
Tensor::from_data(data, new_shape)
}
// ========================================================================
// MATRIX OPERATIONS
// ========================================================================
/// Matrix multiply: [M, K] × [K, N] → [M, N]
/// Uses SIMD-accelerated inner loop on aarch64, scalar fallback otherwise.
pub fn matmul(&self, other: &Tensor) -> Result<Self, String> {
if self.ndim() != 2 || other.ndim() != 2 {
return Err(format!(
"matmul requires 2D tensors, got {}D × {}D",
self.ndim(), other.ndim()
));
}
let (m, k1) = (self.shape[0], self.shape[1]);
let (k2, n) = (other.shape[0], other.shape[1]);
if k1 != k2 {
return Err(format!(
"matmul inner dims mismatch: [{}, {}] × [{}, {}]",
m, k1, k2, n
));
}
let mut out = vec![0.0f32; m * n];
matmul_inner(&self.data, &other.data, &mut out, m, k1, n);
Tensor::from_data(out, vec![m, n])
}
/// Batched matrix multiply: [B, M, K] × [B, K, N] → [B, M, N]
/// Supports broadcasting: [B, M, K] × [K, N] broadcasts the right operand.
pub fn bmm(&self, other: &Tensor) -> Result<Self, String> {
if self.ndim() != 3 {
return Err(format!("bmm requires 3D left tensor, got {}D", self.ndim()));
}
let b = self.shape[0];
let m = self.shape[1];
let k = self.shape[2];
let (other_k, n, broadcast) = if other.ndim() == 2 {
(other.shape[0], other.shape[1], true)
} else if other.ndim() == 3 {
if other.shape[0] != b {
return Err(format!("bmm batch mismatch: {} vs {}", b, other.shape[0]));
}
(other.shape[1], other.shape[2], false)
} else {
return Err(format!("bmm requires 2D or 3D right tensor, got {}D", other.ndim()));
};
if k != other_k {
return Err(format!("bmm inner dims mismatch: {} vs {}", k, other_k));
}
let mut out = vec![0.0f32; b * m * n];
let batch_a = m * k;
let batch_b = if broadcast { 0 } else { other_k * n };
let batch_out = m * n;
for bi in 0..b {
let a_off = bi * batch_a;
let b_off = bi * batch_b;
let o_off = bi * batch_out;
matmul_inner(
&self.data[a_off..a_off + batch_a],
&other.data[b_off..b_off + other_k * n],
&mut out[o_off..o_off + batch_out],
m, k, n,
);
}
Tensor::from_data(out, vec![b, m, n])
}
// ========================================================================
// ELEMENT-WISE OPERATIONS
// ========================================================================
/// Element-wise addition (shapes must match or broadcast along last dim)
pub fn add(&self, other: &Tensor) -> Result<Self, String> {
if self.shape == other.shape {
let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a + b).collect();
return Tensor::from_data(data, self.shape.clone());
}
// Broadcast: other has shape matching last dims of self
if other.numel() == *self.shape.last().unwrap_or(&0) {
let last = *self.shape.last().unwrap();
let data: Vec<f32> = self.data.iter().enumerate()
.map(|(i, &v)| v + other.data[i % last])
.collect();
return Tensor::from_data(data, self.shape.clone());
}
Err(format!("Cannot add shapes {:?} and {:?}", self.shape, other.shape))
}
/// Element-wise subtraction
pub fn sub(&self, other: &Tensor) -> Result<Self, String> {
if self.shape != other.shape {
return Err(format!("Cannot sub shapes {:?} and {:?}", self.shape, other.shape));
}
let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a - b).collect();
Tensor::from_data(data, self.shape.clone())
}
/// Element-wise multiplication (Hadamard product)
pub fn mul(&self, other: &Tensor) -> Result<Self, String> {
if self.shape == other.shape {
let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a * b).collect();
return Tensor::from_data(data, self.shape.clone());
}
// Broadcast along last dim
if other.numel() == *self.shape.last().unwrap_or(&0) {
let last = *self.shape.last().unwrap();
let data: Vec<f32> = self.data.iter().enumerate()
.map(|(i, &v)| v * other.data[i % last])
.collect();
return Tensor::from_data(data, self.shape.clone());
}
Err(format!("Cannot mul shapes {:?} and {:?}", self.shape, other.shape))
}
/// Scalar multiply
pub fn scale(&self, s: f32) -> Self {
Self {
data: self.data.iter().map(|&v| v * s).collect(),
shape: self.shape.clone(),
}
}
/// Scalar add
pub fn add_scalar(&self, s: f32) -> Self {
Self {
data: self.data.iter().map(|&v| v + s).collect(),
shape: self.shape.clone(),
}
}
// ========================================================================
// ACTIVATION FUNCTIONS
// ========================================================================
/// ReLU activation: max(0, x)
pub fn relu(&self) -> Self {
Self {
data: self.data.iter().map(|&v| v.max(0.0)).collect(),
shape: self.shape.clone(),
}
}
/// GELU activation: x × Φ(x) ≈ 0.5x(1 + tanh(√(2/π)(x + 0.044715x³)))
pub fn gelu(&self) -> Self {
let sqrt_2_over_pi = (2.0_f32 / std::f32::consts::PI).sqrt();
Self {
data: self.data.iter().map(|&x| {
let inner = sqrt_2_over_pi * (x + 0.044715 * x * x * x);
0.5 * x * (1.0 + inner.tanh())
}).collect(),
shape: self.shape.clone(),
}
}
/// Softmax along last dimension
/// Numerically stable: subtract max before exp
pub fn softmax(&self) -> Result<Self, String> {
if self.shape.is_empty() {
return Err("Cannot softmax scalar".to_string());
}
let last_dim = *self.shape.last().unwrap();
let num_rows = self.numel() / last_dim;
let mut out = vec![0.0f32; self.numel()];
for row in 0..num_rows {
let offset = row * last_dim;
let slice = &self.data[offset..offset + last_dim];
// Find max for numerical stability
let max_val = slice.iter().cloned().fold(f32::NEG_INFINITY, f32::max);
// Exp and sum
let mut sum = 0.0f32;
for i in 0..last_dim {
let exp_val = (slice[i] - max_val).exp();
out[offset + i] = exp_val;
sum += exp_val;
}
// Normalize
if sum > 0.0 {
for i in 0..last_dim {
out[offset + i] /= sum;
}
}
}
Tensor::from_data(out, self.shape.clone())
}
// ========================================================================
// NORMALIZATION
// ========================================================================
/// Layer normalization along last dimension
/// output = (x - mean) / sqrt(var + eps) * gamma + beta
pub fn layer_norm(&self, gamma: &Tensor, beta: &Tensor, eps: f32) -> Result<Self, String> {
let last_dim = *self.shape.last().ok_or("Cannot layer_norm scalar")?;
if gamma.numel() != last_dim || beta.numel() != last_dim {
return Err(format!(
"layer_norm: gamma/beta size {} doesn't match last dim {}",
gamma.numel(), last_dim
));
}
let num_rows = self.numel() / last_dim;
let mut out = vec![0.0f32; self.numel()];
for row in 0..num_rows {
let offset = row * last_dim;
let slice = &self.data[offset..offset + last_dim];
// Compute mean
let mean: f32 = slice.iter().sum::<f32>() / last_dim as f32;
// Compute variance
let var: f32 = slice.iter()
.map(|&x| (x - mean) * (x - mean))
.sum::<f32>() / last_dim as f32;
let inv_std = 1.0 / (var + eps).sqrt();
// Normalize and apply affine
for i in 0..last_dim {
out[offset + i] = (slice[i] - mean) * inv_std * gamma.data[i] + beta.data[i];
}
}
Tensor::from_data(out, self.shape.clone())
}
// ========================================================================
// REDUCTION OPERATIONS
// ========================================================================
/// Sum all elements
pub fn sum(&self) -> f32 {
self.data.iter().sum()
}
/// Mean of all elements
pub fn mean(&self) -> f32 {
if self.data.is_empty() { return 0.0; }
self.sum() / self.numel() as f32
}
/// Max of all elements
pub fn max(&self) -> f32 {
self.data.iter().cloned().fold(f32::NEG_INFINITY, f32::max)
}
/// Argmax along last dimension
pub fn argmax(&self) -> Vec<usize> {
let last_dim = *self.shape.last().unwrap_or(&1);
let num_rows = self.numel() / last_dim;
let mut indices = Vec::with_capacity(num_rows);
for row in 0..num_rows {
let offset = row * last_dim;
let slice = &self.data[offset..offset + last_dim];
let (idx, _) = slice.iter().enumerate()
.fold((0, f32::NEG_INFINITY), |(best_i, best_v), (i, &v)| {
if v > best_v { (i, v) } else { (best_i, best_v) }
});
indices.push(idx);
}
indices
}
}
// ============================================================================
// DISPLAY
// ============================================================================
impl fmt::Debug for Tensor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Tensor(shape={:?}, numel={})", self.shape, self.numel())
}
}
impl fmt::Display for Tensor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.numel() <= 20 {
write!(f, "Tensor({:?}, {:?})", self.shape, self.data)
} else {
write!(f, "Tensor({:?}, [{:.4}, {:.4}, ... {:.4}, {:.4}])",
self.shape,
self.data[0], self.data[1],
self.data[self.numel() - 2], self.data[self.numel() - 1])
}
}
}
// ============================================================================
// MATRIX MULTIPLY INNER — SIMD-accelerated on aarch64, scalar fallback
// ============================================================================
/// Core matmul kernel: C[m×n] = A[m×k] × B[k×n]
/// Dispatches to SIMD on aarch64, scalar otherwise.
/// Small matrices (m×n < 16) use scalar even on aarch64 — NEON setup overhead
/// exceeds SIMD benefit for tiny dimensions.
fn matmul_inner(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) {
#[cfg(target_arch = "aarch64")]
{
if m * n < 16 {
matmul_scalar(a, b, c, m, k, n);
} else {
matmul_neon(a, b, c, m, k, n);
}
}
#[cfg(not(target_arch = "aarch64"))]
{
matmul_scalar(a, b, c, m, k, n);
}
}
/// Scalar matmul — portable fallback
fn matmul_scalar(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) {
for i in 0..m {
for j in 0..n {
let mut sum = 0.0f32;
for p in 0..k {
sum += a[i * k + p] * b[p * n + j];
}
c[i * n + j] = sum;
}
}
}
/// ARM NEON SIMD matmul — processes 4 floats at a time
#[cfg(target_arch = "aarch64")]
fn matmul_neon(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) {
use std::arch::aarch64::*;
// Process 4 columns at a time using NEON
for i in 0..m {
let mut j = 0;
// SIMD path: 4 columns at a time
while j + 4 <= n {
let mut acc = unsafe { vdupq_n_f32(0.0) };
for p in 0..k {
let a_val = a[i * k + p];
let a_vec = unsafe { vdupq_n_f32(a_val) };
let b_vec = unsafe { vld1q_f32(b.as_ptr().add(p * n + j)) };
acc = unsafe { vfmaq_f32(acc, a_vec, b_vec) };
}
unsafe {
vst1q_f32(c.as_mut_ptr().add(i * n + j), acc);
}
j += 4;
}
// Scalar remainder for columns not divisible by 4
while j < n {
let mut sum = 0.0f32;
for p in 0..k {
sum += a[i * k + p] * b[p * n + j];
}
c[i * n + j] = sum;
j += 1;
}
}
}
// ============================================================================
// UTILITIES
// ============================================================================
/// xorshift64 PRNG — fast, deterministic, no dependencies
fn xorshift64(mut state: u64) -> u64 {
state ^= state << 13;
state ^= state >> 7;
state ^= state << 17;
state
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_zeros_and_ones() {
let z = Tensor::zeros(&[2, 3]);
assert_eq!(z.numel(), 6);
assert_eq!(z.shape, vec![2, 3]);
assert!(z.data.iter().all(|&v| v == 0.0));
let o = Tensor::ones(&[3, 4]);
assert_eq!(o.numel(), 12);
assert!(o.data.iter().all(|&v| v == 1.0));
}
#[test]
fn test_from_data_validation() {
assert!(Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).is_ok());
assert!(Tensor::from_data(vec![1.0, 2.0], vec![3]).is_err());
}
#[test]
fn test_reshape() {
let t = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap();
let r = t.reshape(&[3, 2]).unwrap();
assert_eq!(r.shape, vec![3, 2]);
assert_eq!(r.data, t.data);
assert!(t.reshape(&[4, 2]).is_err());
}
#[test]
fn test_transpose_2d() {
let t = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap();
let tr = t.transpose_2d().unwrap();
assert_eq!(tr.shape, vec![3, 2]);
assert_eq!(tr.data, vec![1.0, 4.0, 2.0, 5.0, 3.0, 6.0]);
}
#[test]
fn test_slice() {
let t = Tensor::from_data(
vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![3, 2]
).unwrap();
let s = t.slice(1, 3).unwrap();
assert_eq!(s.shape, vec![2, 2]);
assert_eq!(s.data, vec![3.0, 4.0, 5.0, 6.0]);
}
#[test]
fn test_concat() {
let a = Tensor::from_data(vec![1.0, 2.0], vec![1, 2]).unwrap();
let b = Tensor::from_data(vec![3.0, 4.0, 5.0, 6.0], vec![2, 2]).unwrap();
let c = Tensor::concat(&[&a, &b]).unwrap();
assert_eq!(c.shape, vec![3, 2]);
assert_eq!(c.data, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
}
#[test]
fn test_matmul_identity() {
// [2,2] × [2,2] identity = same matrix
let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap();
let eye = Tensor::from_data(vec![1.0, 0.0, 0.0, 1.0], vec![2, 2]).unwrap();
let c = a.matmul(&eye).unwrap();
assert_eq!(c.data, vec![1.0, 2.0, 3.0, 4.0]);
}
#[test]
fn test_matmul_known() {
// [1,3] × [3,1] = [1,1] = dot product
let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![1, 3]).unwrap();
let b = Tensor::from_data(vec![4.0, 5.0, 6.0], vec![3, 1]).unwrap();
let c = a.matmul(&b).unwrap();
assert_eq!(c.shape, vec![1, 1]);
assert!((c.data[0] - 32.0).abs() < 1e-5); // 1×4 + 2×5 + 3×6 = 32
}
#[test]
fn test_matmul_dimension_mismatch() {
let a = Tensor::zeros(&[2, 3]);
let b = Tensor::zeros(&[4, 5]);
assert!(a.matmul(&b).is_err());
}
#[test]
fn test_bmm() {
// Batch of 2: [2, 1, 2] × [2, 2, 1] = [2, 1, 1]
let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 1, 2]).unwrap();
let b = Tensor::from_data(vec![5.0, 6.0, 7.0, 8.0], vec![2, 2, 1]).unwrap();
let c = a.bmm(&b).unwrap();
assert_eq!(c.shape, vec![2, 1, 1]);
assert!((c.data[0] - 17.0).abs() < 1e-5); // 1×5 + 2×6 = 17
assert!((c.data[1] - 53.0).abs() < 1e-5); // 3×7 + 4×8 = 53
}
#[test]
fn test_add_same_shape() {
let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap();
let b = Tensor::from_data(vec![10.0, 20.0, 30.0], vec![3]).unwrap();
let c = a.add(&b).unwrap();
assert_eq!(c.data, vec![11.0, 22.0, 33.0]);
}
#[test]
fn test_add_broadcast() {
let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap();
let bias = Tensor::from_data(vec![10.0, 20.0, 30.0], vec![3]).unwrap();
let c = a.add(&bias).unwrap();
assert_eq!(c.data, vec![11.0, 22.0, 33.0, 14.0, 25.0, 36.0]);
}
#[test]
fn test_scale() {
let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap();
let b = a.scale(2.0);
assert_eq!(b.data, vec![2.0, 4.0, 6.0]);
}
#[test]
fn test_relu() {
let a = Tensor::from_data(vec![-2.0, -1.0, 0.0, 1.0, 2.0], vec![5]).unwrap();
let b = a.relu();
assert_eq!(b.data, vec![0.0, 0.0, 0.0, 1.0, 2.0]);
}
#[test]
fn test_gelu() {
let a = Tensor::from_data(vec![0.0, 1.0, -1.0], vec![3]).unwrap();
let b = a.gelu();
assert!((b.data[0] - 0.0).abs() < 1e-4); // GELU(0) ≈ 0
assert!((b.data[1] - 0.8412).abs() < 1e-3); // GELU(1) ≈ 0.8412
assert!((b.data[2] - (-0.1588)).abs() < 1e-3); // GELU(-1) ≈ -0.1588
}
#[test]
fn test_softmax_sum_to_one() {
let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap();
let s = a.softmax().unwrap();
// Each row should sum to 1.0
assert!((s.data[0] + s.data[1] - 1.0).abs() < 1e-5);
assert!((s.data[2] + s.data[3] - 1.0).abs() < 1e-5);
// All values non-negative
assert!(s.data.iter().all(|&v| v >= 0.0));
}
#[test]
fn test_softmax_large_values() {
// Numerical stability: large values shouldn't overflow
let a = Tensor::from_data(vec![1000.0, 1001.0, 1002.0], vec![3]).unwrap();
let s = a.softmax().unwrap();
assert!((s.sum() - 1.0).abs() < 1e-5);
assert!(s.data.iter().all(|v| v.is_finite()));
}
#[test]
fn test_layer_norm() {
let x = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap();
let gamma = Tensor::ones(&[2]);
let beta = Tensor::zeros(&[2]);
let out = x.layer_norm(&gamma, &beta, 1e-5).unwrap();
// Each row should be normalized (mean ≈ 0, std ≈ 1)
let row1_mean = (out.data[0] + out.data[1]) / 2.0;
assert!(row1_mean.abs() < 1e-5);
}
#[test]
fn test_argmax() {
let a = Tensor::from_data(vec![0.1, 0.9, 0.5, 0.8, 0.2, 0.7], vec![2, 3]).unwrap();
let idx = a.argmax();
assert_eq!(idx, vec![1, 0]); // max at index 1 in row 0, index 0 in row 1
}
#[test]
fn test_rand_deterministic() {
let a = Tensor::rand(&[100], 42);
let b = Tensor::rand(&[100], 42);
assert_eq!(a.data, b.data); // Same seed = same values
}
#[test]
fn test_randn_distribution() {
let t = Tensor::randn(&[10000], 42);
let mean = t.mean();
let var: f32 = t.data.iter().map(|&x| (x - mean) * (x - mean)).sum::<f32>() / t.numel() as f32;
assert!(mean.abs() < 0.1); // Mean ≈ 0
assert!((var - 1.0).abs() < 0.15); // Variance ≈ 1
}
#[cfg(target_arch = "aarch64")]
#[test]
fn test_simd_matches_scalar() {
// Verify NEON produces same results as scalar
let a = Tensor::rand(&[8, 16], 42);
let b = Tensor::rand(&[16, 12], 43);
let mut c_simd = vec![0.0f32; 8 * 12];
let mut c_scalar = vec![0.0f32; 8 * 12];
matmul_neon(&a.data, &b.data, &mut c_simd, 8, 16, 12);
matmul_scalar(&a.data, &b.data, &mut c_scalar, 8, 16, 12);
for i in 0..c_simd.len() {
assert!(
(c_simd[i] - c_scalar[i]).abs() < 1e-3,
"Mismatch at {}: SIMD={}, scalar={}", i, c_simd[i], c_scalar[i]
);
}
}
}