| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use std::fmt; |
|
|
| |
| |
| |
|
|
| |
| |
| #[derive(Clone)] |
| pub struct Tensor { |
| |
| pub data: Vec<f32>, |
| |
| pub shape: Vec<usize>, |
| } |
|
|
| impl Tensor { |
| |
| pub fn zeros(shape: &[usize]) -> Self { |
| let size = shape.iter().product::<usize>(); |
| Self { |
| data: vec![0.0; size], |
| shape: shape.to_vec(), |
| } |
| } |
|
|
| |
| pub fn ones(shape: &[usize]) -> Self { |
| let size = shape.iter().product::<usize>(); |
| Self { |
| data: vec![1.0; size], |
| shape: shape.to_vec(), |
| } |
| } |
|
|
| |
| pub fn from_data(data: Vec<f32>, shape: Vec<usize>) -> Result<Self, String> { |
| let expected = shape.iter().product::<usize>(); |
| if data.len() != expected { |
| return Err(format!( |
| "Data length {} doesn't match shape {:?} (expected {})", |
| data.len(), shape, expected |
| )); |
| } |
| Ok(Self { data, shape }) |
| } |
|
|
| |
| |
| pub fn rand(shape: &[usize], seed: u64) -> Self { |
| let size = shape.iter().product::<usize>(); |
| let mut data = Vec::with_capacity(size); |
| let mut state = seed; |
| for _ in 0..size { |
| state = xorshift64(state); |
| data.push((state as f32) / (u64::MAX as f32)); |
| } |
| Self { |
| data, |
| shape: shape.to_vec(), |
| } |
| } |
|
|
| |
| |
| pub fn randn(shape: &[usize], seed: u64) -> Self { |
| let size = shape.iter().product::<usize>(); |
| let mut data = Vec::with_capacity(size); |
| let mut state = seed; |
| for i in 0..size { |
| state = xorshift64(state); |
| let u1 = (state as f64) / (u64::MAX as f64); |
| state = xorshift64(state); |
| let u2 = (state as f64) / (u64::MAX as f64); |
| |
| let u1 = u1.max(1e-10); |
| let val = if i % 2 == 0 { |
| (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos() |
| } else { |
| (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).sin() |
| }; |
| data.push(val as f32); |
| } |
| Self { |
| data, |
| shape: shape.to_vec(), |
| } |
| } |
|
|
| |
| pub fn numel(&self) -> usize { |
| self.data.len() |
| } |
|
|
| |
| pub fn ndim(&self) -> usize { |
| self.shape.len() |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn reshape(&self, new_shape: &[usize]) -> Result<Self, String> { |
| let new_size: usize = new_shape.iter().product(); |
| if new_size != self.numel() { |
| return Err(format!( |
| "Cannot reshape {:?} ({}) to {:?} ({})", |
| self.shape, self.numel(), new_shape, new_size |
| )); |
| } |
| Ok(Self { |
| data: self.data.clone(), |
| shape: new_shape.to_vec(), |
| }) |
| } |
|
|
| |
| pub fn transpose_2d(&self) -> Result<Self, String> { |
| if self.ndim() != 2 { |
| return Err(format!("transpose_2d requires 2D tensor, got {}D", self.ndim())); |
| } |
| let (rows, cols) = (self.shape[0], self.shape[1]); |
| let mut out = vec![0.0f32; rows * cols]; |
| for r in 0..rows { |
| for c in 0..cols { |
| out[c * rows + r] = self.data[r * cols + c]; |
| } |
| } |
| Tensor::from_data(out, vec![cols, rows]) |
| } |
|
|
| |
| pub fn slice(&self, start: usize, end: usize) -> Result<Self, String> { |
| if self.ndim() == 0 { |
| return Err("Cannot slice scalar tensor".to_string()); |
| } |
| if start >= end || end > self.shape[0] { |
| return Err(format!( |
| "Invalid slice [{}..{}) for dim 0 size {}", |
| start, end, self.shape[0] |
| )); |
| } |
| let inner_size: usize = self.shape[1..].iter().product(); |
| let data = self.data[start * inner_size..end * inner_size].to_vec(); |
| let mut new_shape = self.shape.clone(); |
| new_shape[0] = end - start; |
| Tensor::from_data(data, new_shape) |
| } |
|
|
| |
| pub fn concat(tensors: &[&Tensor]) -> Result<Self, String> { |
| if tensors.is_empty() { |
| return Err("Cannot concat empty list".to_string()); |
| } |
| let base_shape = &tensors[0].shape[1..]; |
| for (i, t) in tensors.iter().enumerate().skip(1) { |
| if t.shape[1..] != *base_shape { |
| return Err(format!( |
| "Shape mismatch at index {}: {:?} vs {:?}", |
| i, &t.shape[1..], base_shape |
| )); |
| } |
| } |
| let total_first: usize = tensors.iter().map(|t| t.shape[0]).sum(); |
| let mut data = Vec::with_capacity(total_first * base_shape.iter().product::<usize>()); |
| for t in tensors { |
| data.extend_from_slice(&t.data); |
| } |
| let mut new_shape = vec![total_first]; |
| new_shape.extend_from_slice(base_shape); |
| Tensor::from_data(data, new_shape) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn matmul(&self, other: &Tensor) -> Result<Self, String> { |
| if self.ndim() != 2 || other.ndim() != 2 { |
| return Err(format!( |
| "matmul requires 2D tensors, got {}D × {}D", |
| self.ndim(), other.ndim() |
| )); |
| } |
| let (m, k1) = (self.shape[0], self.shape[1]); |
| let (k2, n) = (other.shape[0], other.shape[1]); |
| if k1 != k2 { |
| return Err(format!( |
| "matmul inner dims mismatch: [{}, {}] × [{}, {}]", |
| m, k1, k2, n |
| )); |
| } |
| let mut out = vec![0.0f32; m * n]; |
| matmul_inner(&self.data, &other.data, &mut out, m, k1, n); |
| Tensor::from_data(out, vec![m, n]) |
| } |
|
|
| |
| |
| pub fn bmm(&self, other: &Tensor) -> Result<Self, String> { |
| if self.ndim() != 3 { |
| return Err(format!("bmm requires 3D left tensor, got {}D", self.ndim())); |
| } |
| let b = self.shape[0]; |
| let m = self.shape[1]; |
| let k = self.shape[2]; |
|
|
| let (other_k, n, broadcast) = if other.ndim() == 2 { |
| (other.shape[0], other.shape[1], true) |
| } else if other.ndim() == 3 { |
| if other.shape[0] != b { |
| return Err(format!("bmm batch mismatch: {} vs {}", b, other.shape[0])); |
| } |
| (other.shape[1], other.shape[2], false) |
| } else { |
| return Err(format!("bmm requires 2D or 3D right tensor, got {}D", other.ndim())); |
| }; |
|
|
| if k != other_k { |
| return Err(format!("bmm inner dims mismatch: {} vs {}", k, other_k)); |
| } |
|
|
| let mut out = vec![0.0f32; b * m * n]; |
| let batch_a = m * k; |
| let batch_b = if broadcast { 0 } else { other_k * n }; |
| let batch_out = m * n; |
|
|
| for bi in 0..b { |
| let a_off = bi * batch_a; |
| let b_off = bi * batch_b; |
| let o_off = bi * batch_out; |
| matmul_inner( |
| &self.data[a_off..a_off + batch_a], |
| &other.data[b_off..b_off + other_k * n], |
| &mut out[o_off..o_off + batch_out], |
| m, k, n, |
| ); |
| } |
| Tensor::from_data(out, vec![b, m, n]) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn add(&self, other: &Tensor) -> Result<Self, String> { |
| if self.shape == other.shape { |
| let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a + b).collect(); |
| return Tensor::from_data(data, self.shape.clone()); |
| } |
| |
| if other.numel() == *self.shape.last().unwrap_or(&0) { |
| let last = *self.shape.last().unwrap(); |
| let data: Vec<f32> = self.data.iter().enumerate() |
| .map(|(i, &v)| v + other.data[i % last]) |
| .collect(); |
| return Tensor::from_data(data, self.shape.clone()); |
| } |
| Err(format!("Cannot add shapes {:?} and {:?}", self.shape, other.shape)) |
| } |
|
|
| |
| pub fn sub(&self, other: &Tensor) -> Result<Self, String> { |
| if self.shape != other.shape { |
| return Err(format!("Cannot sub shapes {:?} and {:?}", self.shape, other.shape)); |
| } |
| let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a - b).collect(); |
| Tensor::from_data(data, self.shape.clone()) |
| } |
|
|
| |
| pub fn mul(&self, other: &Tensor) -> Result<Self, String> { |
| if self.shape == other.shape { |
| let data: Vec<f32> = self.data.iter().zip(&other.data).map(|(a, b)| a * b).collect(); |
| return Tensor::from_data(data, self.shape.clone()); |
| } |
| |
| if other.numel() == *self.shape.last().unwrap_or(&0) { |
| let last = *self.shape.last().unwrap(); |
| let data: Vec<f32> = self.data.iter().enumerate() |
| .map(|(i, &v)| v * other.data[i % last]) |
| .collect(); |
| return Tensor::from_data(data, self.shape.clone()); |
| } |
| Err(format!("Cannot mul shapes {:?} and {:?}", self.shape, other.shape)) |
| } |
|
|
| |
| pub fn scale(&self, s: f32) -> Self { |
| Self { |
| data: self.data.iter().map(|&v| v * s).collect(), |
| shape: self.shape.clone(), |
| } |
| } |
|
|
| |
| pub fn add_scalar(&self, s: f32) -> Self { |
| Self { |
| data: self.data.iter().map(|&v| v + s).collect(), |
| shape: self.shape.clone(), |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn relu(&self) -> Self { |
| Self { |
| data: self.data.iter().map(|&v| v.max(0.0)).collect(), |
| shape: self.shape.clone(), |
| } |
| } |
|
|
| |
| pub fn gelu(&self) -> Self { |
| let sqrt_2_over_pi = (2.0_f32 / std::f32::consts::PI).sqrt(); |
| Self { |
| data: self.data.iter().map(|&x| { |
| let inner = sqrt_2_over_pi * (x + 0.044715 * x * x * x); |
| 0.5 * x * (1.0 + inner.tanh()) |
| }).collect(), |
| shape: self.shape.clone(), |
| } |
| } |
|
|
| |
| |
| pub fn softmax(&self) -> Result<Self, String> { |
| if self.shape.is_empty() { |
| return Err("Cannot softmax scalar".to_string()); |
| } |
| let last_dim = *self.shape.last().unwrap(); |
| let num_rows = self.numel() / last_dim; |
| let mut out = vec![0.0f32; self.numel()]; |
|
|
| for row in 0..num_rows { |
| let offset = row * last_dim; |
| let slice = &self.data[offset..offset + last_dim]; |
|
|
| |
| let max_val = slice.iter().cloned().fold(f32::NEG_INFINITY, f32::max); |
|
|
| |
| let mut sum = 0.0f32; |
| for i in 0..last_dim { |
| let exp_val = (slice[i] - max_val).exp(); |
| out[offset + i] = exp_val; |
| sum += exp_val; |
| } |
|
|
| |
| if sum > 0.0 { |
| for i in 0..last_dim { |
| out[offset + i] /= sum; |
| } |
| } |
| } |
| Tensor::from_data(out, self.shape.clone()) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| pub fn layer_norm(&self, gamma: &Tensor, beta: &Tensor, eps: f32) -> Result<Self, String> { |
| let last_dim = *self.shape.last().ok_or("Cannot layer_norm scalar")?; |
| if gamma.numel() != last_dim || beta.numel() != last_dim { |
| return Err(format!( |
| "layer_norm: gamma/beta size {} doesn't match last dim {}", |
| gamma.numel(), last_dim |
| )); |
| } |
| let num_rows = self.numel() / last_dim; |
| let mut out = vec![0.0f32; self.numel()]; |
|
|
| for row in 0..num_rows { |
| let offset = row * last_dim; |
| let slice = &self.data[offset..offset + last_dim]; |
|
|
| |
| let mean: f32 = slice.iter().sum::<f32>() / last_dim as f32; |
|
|
| |
| let var: f32 = slice.iter() |
| .map(|&x| (x - mean) * (x - mean)) |
| .sum::<f32>() / last_dim as f32; |
|
|
| let inv_std = 1.0 / (var + eps).sqrt(); |
|
|
| |
| for i in 0..last_dim { |
| out[offset + i] = (slice[i] - mean) * inv_std * gamma.data[i] + beta.data[i]; |
| } |
| } |
| Tensor::from_data(out, self.shape.clone()) |
| } |
|
|
| |
| |
| |
|
|
| |
| pub fn sum(&self) -> f32 { |
| self.data.iter().sum() |
| } |
|
|
| |
| pub fn mean(&self) -> f32 { |
| if self.data.is_empty() { return 0.0; } |
| self.sum() / self.numel() as f32 |
| } |
|
|
| |
| pub fn max(&self) -> f32 { |
| self.data.iter().cloned().fold(f32::NEG_INFINITY, f32::max) |
| } |
|
|
| |
| pub fn argmax(&self) -> Vec<usize> { |
| let last_dim = *self.shape.last().unwrap_or(&1); |
| let num_rows = self.numel() / last_dim; |
| let mut indices = Vec::with_capacity(num_rows); |
| for row in 0..num_rows { |
| let offset = row * last_dim; |
| let slice = &self.data[offset..offset + last_dim]; |
| let (idx, _) = slice.iter().enumerate() |
| .fold((0, f32::NEG_INFINITY), |(best_i, best_v), (i, &v)| { |
| if v > best_v { (i, v) } else { (best_i, best_v) } |
| }); |
| indices.push(idx); |
| } |
| indices |
| } |
| } |
|
|
| |
| |
| |
|
|
| impl fmt::Debug for Tensor { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| write!(f, "Tensor(shape={:?}, numel={})", self.shape, self.numel()) |
| } |
| } |
|
|
| impl fmt::Display for Tensor { |
| fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| if self.numel() <= 20 { |
| write!(f, "Tensor({:?}, {:?})", self.shape, self.data) |
| } else { |
| write!(f, "Tensor({:?}, [{:.4}, {:.4}, ... {:.4}, {:.4}])", |
| self.shape, |
| self.data[0], self.data[1], |
| self.data[self.numel() - 2], self.data[self.numel() - 1]) |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| fn matmul_inner(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) { |
| #[cfg(target_arch = "aarch64")] |
| { |
| if m * n < 16 { |
| matmul_scalar(a, b, c, m, k, n); |
| } else { |
| matmul_neon(a, b, c, m, k, n); |
| } |
| } |
| #[cfg(not(target_arch = "aarch64"))] |
| { |
| matmul_scalar(a, b, c, m, k, n); |
| } |
| } |
|
|
| |
| fn matmul_scalar(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) { |
| for i in 0..m { |
| for j in 0..n { |
| let mut sum = 0.0f32; |
| for p in 0..k { |
| sum += a[i * k + p] * b[p * n + j]; |
| } |
| c[i * n + j] = sum; |
| } |
| } |
| } |
|
|
| |
| #[cfg(target_arch = "aarch64")] |
| fn matmul_neon(a: &[f32], b: &[f32], c: &mut [f32], m: usize, k: usize, n: usize) { |
| use std::arch::aarch64::*; |
|
|
| |
| for i in 0..m { |
| let mut j = 0; |
| |
| while j + 4 <= n { |
| let mut acc = unsafe { vdupq_n_f32(0.0) }; |
| for p in 0..k { |
| let a_val = a[i * k + p]; |
| let a_vec = unsafe { vdupq_n_f32(a_val) }; |
| let b_vec = unsafe { vld1q_f32(b.as_ptr().add(p * n + j)) }; |
| acc = unsafe { vfmaq_f32(acc, a_vec, b_vec) }; |
| } |
| unsafe { |
| vst1q_f32(c.as_mut_ptr().add(i * n + j), acc); |
| } |
| j += 4; |
| } |
| |
| while j < n { |
| let mut sum = 0.0f32; |
| for p in 0..k { |
| sum += a[i * k + p] * b[p * n + j]; |
| } |
| c[i * n + j] = sum; |
| j += 1; |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| fn xorshift64(mut state: u64) -> u64 { |
| state ^= state << 13; |
| state ^= state >> 7; |
| state ^= state << 17; |
| state |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_zeros_and_ones() { |
| let z = Tensor::zeros(&[2, 3]); |
| assert_eq!(z.numel(), 6); |
| assert_eq!(z.shape, vec![2, 3]); |
| assert!(z.data.iter().all(|&v| v == 0.0)); |
|
|
| let o = Tensor::ones(&[3, 4]); |
| assert_eq!(o.numel(), 12); |
| assert!(o.data.iter().all(|&v| v == 1.0)); |
| } |
|
|
| #[test] |
| fn test_from_data_validation() { |
| assert!(Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).is_ok()); |
| assert!(Tensor::from_data(vec![1.0, 2.0], vec![3]).is_err()); |
| } |
|
|
| #[test] |
| fn test_reshape() { |
| let t = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap(); |
| let r = t.reshape(&[3, 2]).unwrap(); |
| assert_eq!(r.shape, vec![3, 2]); |
| assert_eq!(r.data, t.data); |
| assert!(t.reshape(&[4, 2]).is_err()); |
| } |
|
|
| #[test] |
| fn test_transpose_2d() { |
| let t = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap(); |
| let tr = t.transpose_2d().unwrap(); |
| assert_eq!(tr.shape, vec![3, 2]); |
| assert_eq!(tr.data, vec![1.0, 4.0, 2.0, 5.0, 3.0, 6.0]); |
| } |
|
|
| #[test] |
| fn test_slice() { |
| let t = Tensor::from_data( |
| vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![3, 2] |
| ).unwrap(); |
| let s = t.slice(1, 3).unwrap(); |
| assert_eq!(s.shape, vec![2, 2]); |
| assert_eq!(s.data, vec![3.0, 4.0, 5.0, 6.0]); |
| } |
|
|
| #[test] |
| fn test_concat() { |
| let a = Tensor::from_data(vec![1.0, 2.0], vec![1, 2]).unwrap(); |
| let b = Tensor::from_data(vec![3.0, 4.0, 5.0, 6.0], vec![2, 2]).unwrap(); |
| let c = Tensor::concat(&[&a, &b]).unwrap(); |
| assert_eq!(c.shape, vec![3, 2]); |
| assert_eq!(c.data, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]); |
| } |
|
|
| #[test] |
| fn test_matmul_identity() { |
| |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap(); |
| let eye = Tensor::from_data(vec![1.0, 0.0, 0.0, 1.0], vec![2, 2]).unwrap(); |
| let c = a.matmul(&eye).unwrap(); |
| assert_eq!(c.data, vec![1.0, 2.0, 3.0, 4.0]); |
| } |
|
|
| #[test] |
| fn test_matmul_known() { |
| |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![1, 3]).unwrap(); |
| let b = Tensor::from_data(vec![4.0, 5.0, 6.0], vec![3, 1]).unwrap(); |
| let c = a.matmul(&b).unwrap(); |
| assert_eq!(c.shape, vec![1, 1]); |
| assert!((c.data[0] - 32.0).abs() < 1e-5); |
| } |
|
|
| #[test] |
| fn test_matmul_dimension_mismatch() { |
| let a = Tensor::zeros(&[2, 3]); |
| let b = Tensor::zeros(&[4, 5]); |
| assert!(a.matmul(&b).is_err()); |
| } |
|
|
| #[test] |
| fn test_bmm() { |
| |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 1, 2]).unwrap(); |
| let b = Tensor::from_data(vec![5.0, 6.0, 7.0, 8.0], vec![2, 2, 1]).unwrap(); |
| let c = a.bmm(&b).unwrap(); |
| assert_eq!(c.shape, vec![2, 1, 1]); |
| assert!((c.data[0] - 17.0).abs() < 1e-5); |
| assert!((c.data[1] - 53.0).abs() < 1e-5); |
| } |
|
|
| #[test] |
| fn test_add_same_shape() { |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(); |
| let b = Tensor::from_data(vec![10.0, 20.0, 30.0], vec![3]).unwrap(); |
| let c = a.add(&b).unwrap(); |
| assert_eq!(c.data, vec![11.0, 22.0, 33.0]); |
| } |
|
|
| #[test] |
| fn test_add_broadcast() { |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![2, 3]).unwrap(); |
| let bias = Tensor::from_data(vec![10.0, 20.0, 30.0], vec![3]).unwrap(); |
| let c = a.add(&bias).unwrap(); |
| assert_eq!(c.data, vec![11.0, 22.0, 33.0, 14.0, 25.0, 36.0]); |
| } |
|
|
| #[test] |
| fn test_scale() { |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0], vec![3]).unwrap(); |
| let b = a.scale(2.0); |
| assert_eq!(b.data, vec![2.0, 4.0, 6.0]); |
| } |
|
|
| #[test] |
| fn test_relu() { |
| let a = Tensor::from_data(vec![-2.0, -1.0, 0.0, 1.0, 2.0], vec![5]).unwrap(); |
| let b = a.relu(); |
| assert_eq!(b.data, vec![0.0, 0.0, 0.0, 1.0, 2.0]); |
| } |
|
|
| #[test] |
| fn test_gelu() { |
| let a = Tensor::from_data(vec![0.0, 1.0, -1.0], vec![3]).unwrap(); |
| let b = a.gelu(); |
| assert!((b.data[0] - 0.0).abs() < 1e-4); |
| assert!((b.data[1] - 0.8412).abs() < 1e-3); |
| assert!((b.data[2] - (-0.1588)).abs() < 1e-3); |
| } |
|
|
| #[test] |
| fn test_softmax_sum_to_one() { |
| let a = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap(); |
| let s = a.softmax().unwrap(); |
| |
| assert!((s.data[0] + s.data[1] - 1.0).abs() < 1e-5); |
| assert!((s.data[2] + s.data[3] - 1.0).abs() < 1e-5); |
| |
| assert!(s.data.iter().all(|&v| v >= 0.0)); |
| } |
|
|
| #[test] |
| fn test_softmax_large_values() { |
| |
| let a = Tensor::from_data(vec![1000.0, 1001.0, 1002.0], vec![3]).unwrap(); |
| let s = a.softmax().unwrap(); |
| assert!((s.sum() - 1.0).abs() < 1e-5); |
| assert!(s.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_layer_norm() { |
| let x = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![2, 2]).unwrap(); |
| let gamma = Tensor::ones(&[2]); |
| let beta = Tensor::zeros(&[2]); |
| let out = x.layer_norm(&gamma, &beta, 1e-5).unwrap(); |
| |
| let row1_mean = (out.data[0] + out.data[1]) / 2.0; |
| assert!(row1_mean.abs() < 1e-5); |
| } |
|
|
| #[test] |
| fn test_argmax() { |
| let a = Tensor::from_data(vec![0.1, 0.9, 0.5, 0.8, 0.2, 0.7], vec![2, 3]).unwrap(); |
| let idx = a.argmax(); |
| assert_eq!(idx, vec![1, 0]); |
| } |
|
|
| #[test] |
| fn test_rand_deterministic() { |
| let a = Tensor::rand(&[100], 42); |
| let b = Tensor::rand(&[100], 42); |
| assert_eq!(a.data, b.data); |
| } |
|
|
| #[test] |
| fn test_randn_distribution() { |
| let t = Tensor::randn(&[10000], 42); |
| let mean = t.mean(); |
| let var: f32 = t.data.iter().map(|&x| (x - mean) * (x - mean)).sum::<f32>() / t.numel() as f32; |
| assert!(mean.abs() < 0.1); |
| assert!((var - 1.0).abs() < 0.15); |
| } |
|
|
| #[cfg(target_arch = "aarch64")] |
| #[test] |
| fn test_simd_matches_scalar() { |
| |
| let a = Tensor::rand(&[8, 16], 42); |
| let b = Tensor::rand(&[16, 12], 43); |
|
|
| let mut c_simd = vec![0.0f32; 8 * 12]; |
| let mut c_scalar = vec![0.0f32; 8 * 12]; |
|
|
| matmul_neon(&a.data, &b.data, &mut c_simd, 8, 16, 12); |
| matmul_scalar(&a.data, &b.data, &mut c_scalar, 8, 16, 12); |
|
|
| for i in 0..c_simd.len() { |
| assert!( |
| (c_simd[i] - c_scalar[i]).abs() < 1e-3, |
| "Mismatch at {}: SIMD={}, scalar={}", i, c_simd[i], c_scalar[i] |
| ); |
| } |
| } |
| } |
|
|