| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| pub fn cross_entropy_loss(logits: &Tensor, targets: &[usize]) -> Result<(f32, Tensor), String> { |
| if logits.ndim() != 2 { |
| return Err(format!("cross_entropy expects 2D logits, got {}D", logits.ndim())); |
| } |
| let batch = logits.shape[0]; |
| let vocab = logits.shape[1]; |
|
|
| if targets.len() != batch { |
| return Err(format!("Target count {} != batch size {}", targets.len(), batch)); |
| } |
|
|
| |
| let probs = logits.softmax()?; |
| let mut total_loss = 0.0f32; |
| let mut grad = probs.data.clone(); |
|
|
| for b in 0..batch { |
| let target_idx = targets[b]; |
| if target_idx >= vocab { |
| return Err(format!("Target index {} >= vocab size {}", target_idx, vocab)); |
| } |
| let p = probs.data[b * vocab + target_idx].max(1e-10); |
| total_loss -= p.ln(); |
|
|
| |
| grad[b * vocab + target_idx] -= 1.0; |
| } |
|
|
| |
| let loss = total_loss / batch as f32; |
| let scale = 1.0 / batch as f32; |
| let grad_data: Vec<f32> = grad.iter().map(|&g| g * scale).collect(); |
| let d_logits = Tensor::from_data(grad_data, vec![batch, vocab])?; |
|
|
| Ok((loss, d_logits)) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| pub fn binary_ce_loss( |
| predictions: &Tensor, |
| labels: &[f32], |
| weights: &[f32], |
| ) -> Result<(f32, Tensor), String> { |
| let batch = predictions.numel(); |
| if labels.len() != batch || weights.len() != batch { |
| return Err(format!( |
| "Size mismatch: predictions={}, labels={}, weights={}", |
| batch, labels.len(), weights.len() |
| )); |
| } |
|
|
| let mut total_loss = 0.0f32; |
| let mut grad = vec![0.0f32; batch]; |
|
|
| for i in 0..batch { |
| let p = predictions.data[i].clamp(1e-7, 1.0 - 1e-7); |
| let y = labels[i].clamp(0.0, 1.0); |
| let w = weights[i]; |
|
|
| total_loss -= w * (y * p.ln() + (1.0 - y) * (1.0 - p).ln()); |
| grad[i] = w * (p - y) / (p * (1.0 - p)); |
| } |
|
|
| let loss = total_loss / batch as f32; |
| let scale = 1.0 / batch as f32; |
| let grad_data: Vec<f32> = grad.iter().map(|&g| g * scale).collect(); |
| let d_pred = Tensor::from_data(grad_data, predictions.shape.clone())?; |
|
|
| Ok((loss, d_pred)) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn attention_backward( |
| d_output: &Tensor, |
| q: &Tensor, |
| k: &Tensor, |
| v: &Tensor, |
| attn_weights: &Tensor, |
| scale: f32, |
| ) -> Result<(Tensor, Tensor, Tensor), String> { |
| |
| |
| |
| let p_t = attn_weights.transpose_2d()?; |
| let dv = p_t.matmul(d_output)?; |
|
|
| |
| let v_t = v.transpose_2d()?; |
| let dp = d_output.matmul(&v_t)?; |
|
|
| |
| |
| |
| let seq_len = attn_weights.shape[0]; |
| let mut ds_data = vec![0.0f32; seq_len * seq_len]; |
| for i in 0..seq_len { |
| let p_offset = i * seq_len; |
| let dp_offset = i * seq_len; |
|
|
| |
| let mut dot = 0.0f32; |
| for j in 0..seq_len { |
| dot += attn_weights.data[p_offset + j] * dp.data[dp_offset + j]; |
| } |
|
|
| |
| for j in 0..seq_len { |
| ds_data[p_offset + j] = attn_weights.data[p_offset + j] |
| * (dp.data[dp_offset + j] - dot); |
| } |
| } |
| let ds = Tensor::from_data(ds_data, vec![seq_len, seq_len])?; |
|
|
| |
| let ds_scaled = ds.scale(scale); |
|
|
| |
| let dq = ds_scaled.matmul(k)?; |
|
|
| |
| let ds_t = ds_scaled.transpose_2d()?; |
| let dk = ds_t.matmul(q)?; |
|
|
| Ok((dq, dk, dv)) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| pub fn linear_backward( |
| d_output: &Tensor, |
| input: &Tensor, |
| weight: &Tensor, |
| ) -> Result<(Tensor, Tensor, Tensor), String> { |
| let batch = d_output.shape[0]; |
| let out_f = d_output.shape[1]; |
| let in_f = input.shape[1]; |
| debug_assert_eq!(in_f, weight.shape[1], "linear_backward: input features != weight cols"); |
|
|
| |
| let d_out_t = d_output.transpose_2d()?; |
| let d_weight = d_out_t.matmul(input)?; |
|
|
| |
| let mut d_bias_data = vec![0.0f32; out_f]; |
| for b in 0..batch { |
| for j in 0..out_f { |
| d_bias_data[j] += d_output.data[b * out_f + j]; |
| } |
| } |
| let d_bias = Tensor::from_data(d_bias_data, vec![out_f])?; |
|
|
| |
| let d_input = d_output.matmul(weight)?; |
|
|
| Ok((d_weight, d_bias, d_input)) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| pub fn ffn_backward( |
| d_output: &Tensor, |
| input: &Tensor, |
| hidden_pre_gelu: &Tensor, |
| w1: &Tensor, |
| w2: &Tensor, |
| ) -> Result<(Tensor, Tensor, Tensor, Tensor, Tensor), String> { |
| |
| let gelu_out = hidden_pre_gelu.gelu(); |
| let (dw2, db2, d_gelu) = linear_backward(d_output, &gelu_out, w2)?; |
|
|
| |
| let d_hidden = gelu_backward(&d_gelu, hidden_pre_gelu); |
|
|
| |
| let (dw1, db1, d_input) = linear_backward(&d_hidden, input, w1)?; |
|
|
| Ok((dw1, db1, dw2, db2, d_input)) |
| } |
|
|
| |
| |
| |
| fn gelu_backward(d_output: &Tensor, input: &Tensor) -> Tensor { |
| let sqrt_2_over_pi = (2.0_f32 / std::f32::consts::PI).sqrt(); |
| let data: Vec<f32> = d_output.data.iter().zip(&input.data).map(|(&dy, &x)| { |
| let cube = 0.044715 * x * x * x; |
| let inner = sqrt_2_over_pi * (x + cube); |
| let tanh_val = inner.tanh(); |
| let sech2 = 1.0 - tanh_val * tanh_val; |
| let d_inner = sqrt_2_over_pi * (1.0 + 3.0 * 0.044715 * x * x); |
| |
| let gelu_grad = 0.5 * (1.0 + tanh_val) + 0.5 * x * sech2 * d_inner; |
| dy * gelu_grad |
| }).collect(); |
| Tensor { data, shape: input.shape.clone() } |
| } |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn layer_norm_backward( |
| d_output: &Tensor, |
| input: &Tensor, |
| gamma: &Tensor, |
| eps: f32, |
| ) -> Result<(Tensor, Tensor, Tensor), String> { |
| let batch = d_output.shape[0]; |
| let dim = d_output.shape[1]; |
|
|
| let mut d_gamma_data = vec![0.0f32; dim]; |
| let mut d_beta_data = vec![0.0f32; dim]; |
| let mut d_input_data = vec![0.0f32; batch * dim]; |
|
|
| for b in 0..batch { |
| let offset = b * dim; |
| let x_slice = &input.data[offset..offset + dim]; |
|
|
| |
| let mean: f32 = x_slice.iter().sum::<f32>() / dim as f32; |
| let var: f32 = x_slice.iter().map(|&x| (x - mean) * (x - mean)).sum::<f32>() / dim as f32; |
| let inv_std = 1.0 / (var + eps).sqrt(); |
|
|
| |
| let x_hat: Vec<f32> = x_slice.iter().map(|&x| (x - mean) * inv_std).collect(); |
|
|
| |
| |
| for i in 0..dim { |
| let dy = d_output.data[offset + i]; |
| d_beta_data[i] += dy; |
| d_gamma_data[i] += dy * x_hat[i]; |
| } |
|
|
| |
| |
| let n = dim as f32; |
| let mut sum_dy_gamma = 0.0f32; |
| let mut sum_dy_gamma_xhat = 0.0f32; |
| for i in 0..dim { |
| let dy_g = d_output.data[offset + i] * gamma.data[i]; |
| sum_dy_gamma += dy_g; |
| sum_dy_gamma_xhat += dy_g * x_hat[i]; |
| } |
|
|
| for i in 0..dim { |
| let dy_g = d_output.data[offset + i] * gamma.data[i]; |
| d_input_data[offset + i] = inv_std / n |
| * (n * dy_g - sum_dy_gamma - x_hat[i] * sum_dy_gamma_xhat); |
| } |
| } |
|
|
| let d_gamma = Tensor::from_data(d_gamma_data, vec![dim])?; |
| let d_beta = Tensor::from_data(d_beta_data, vec![dim])?; |
| let d_input = Tensor::from_data(d_input_data, vec![batch, dim])?; |
|
|
| Ok((d_gamma, d_beta, d_input)) |
| } |
|
|
| |
| |
| |
| |
| |
| |
| pub fn embedding_backward( |
| d_output: &Tensor, |
| indices: &[usize], |
| vocab_size: usize, |
| ) -> Result<Tensor, String> { |
| let batch = d_output.shape[0]; |
| let d_model = d_output.shape[1]; |
|
|
| let mut grad = vec![0.0f32; vocab_size * d_model]; |
|
|
| for b in 0..batch { |
| let idx = indices[b]; |
| if idx >= vocab_size { |
| return Err(format!("Index {} >= vocab_size {}", idx, vocab_size)); |
| } |
| for d in 0..d_model { |
| grad[idx * d_model + d] += d_output.data[b * d_model + d]; |
| } |
| } |
|
|
| Tensor::from_data(grad, vec![vocab_size, d_model]) |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub fn model_backward_causal( |
| d_logits: &Tensor, |
| cache: &crate::transformer::ForwardCache, |
| model: &crate::transformer::SPFTransformer, |
| ) -> Result<Vec<Tensor>, String> { |
| let batch = d_logits.shape[0]; |
| let seq = d_logits.shape[1]; |
| let vocab = d_logits.shape[2]; |
| let d_model = model.config.d_model; |
|
|
| |
| let d_logits_2d = d_logits.reshape(&[batch * seq, vocab])?; |
| let dec_out_2d = cache.decoder_output.reshape(&[batch * seq, d_model])?; |
| let (d_output_proj, d_output_bias, d_dec_out) = linear_backward( |
| &d_logits_2d, &dec_out_2d, &model.output_projection, |
| )?; |
|
|
| |
| let mut d_x = d_dec_out.reshape(&[batch, seq, d_model])?; |
|
|
| |
| let d_x_2d = d_x.reshape(&[batch * seq, d_model])?; |
| |
| |
| let (d_final_ln_gamma, d_final_ln_beta, d_x_pre_ln) = layer_norm_backward( |
| &d_x_2d, |
| &cache.decoder_output.reshape(&[batch * seq, d_model])?, |
| &model.decoder.final_ln_gamma, |
| model.config.ln_eps, |
| )?; |
| d_x = d_x_pre_ln.reshape(&[batch, seq, d_model])?; |
|
|
| |
| let mut decoder_grads: Vec<Vec<Tensor>> = Vec::new(); |
| for (layer_idx, layer) in model.decoder.layers.iter().enumerate().rev() { |
| let layer_cache = &cache.decoder_caches[layer_idx]; |
|
|
| |
| let d_x_2d = d_x.reshape(&[batch * seq, d_model])?; |
| let (d_ln3_gamma, d_ln3_beta, d_ln3_input) = layer_norm_backward( |
| &d_x_2d, &layer_cache.ln3_input.reshape(&[batch * seq, d_model])?, |
| &layer.ln3_gamma, layer.ln_eps, |
| )?; |
| let (dw1, db1, dw2, db2, d_ffn_in) = ffn_backward( |
| &d_ln3_input, &layer_cache.ffn_cache.input, |
| &layer_cache.ffn_cache.hidden_pre_gelu, |
| &layer.ffn.w1, &layer.ffn.w2, |
| )?; |
| |
| let d_ffn_3d = d_ffn_in.reshape(&[batch, seq, d_model])?; |
| d_x = d_x.add(&d_ffn_3d)?; |
|
|
| |
| let d_x_2d = d_x.reshape(&[batch * seq, d_model])?; |
| let (d_ln1_gamma, d_ln1_beta, d_ln1_input) = layer_norm_backward( |
| &d_x_2d, &layer_cache.ln1_input.reshape(&[batch * seq, d_model])?, |
| &layer.ln1_gamma, layer.ln_eps, |
| )?; |
|
|
| |
| let n_heads = layer.self_attn.config.n_heads; |
| let d_head = layer.self_attn.config.d_head(); |
| let acache = &layer_cache.self_attn_cache; |
|
|
| let mut dq_all = vec![0.0f32; batch * seq * d_model]; |
| let mut dk_all = vec![0.0f32; batch * seq * d_model]; |
| let mut dv_all = vec![0.0f32; batch * seq * d_model]; |
|
|
| for b in 0..batch { |
| for h in 0..n_heads { |
| let bh = b * n_heads + h; |
| let q_off = bh * seq * d_head; |
| let w_off = bh * seq * seq; |
|
|
| |
| let mut d_head_out = Tensor::zeros(&[seq, d_head]); |
| for s in 0..seq { |
| for dd in 0..d_head { |
| d_head_out.data[s * d_head + dd] = d_ln1_input.data[(b * seq + s) * d_model + h * d_head + dd]; |
| } |
| } |
|
|
| |
| let q_slice = Tensor::from_data( |
| acache.q.data[q_off..q_off + seq * d_head].to_vec(), vec![seq, d_head])?; |
| let k_slice = Tensor::from_data( |
| acache.k.data[q_off..q_off + seq * d_head].to_vec(), vec![seq, d_head])?; |
| let v_slice = Tensor::from_data( |
| acache.v.data[q_off..q_off + seq * d_head].to_vec(), vec![seq, d_head])?; |
| let w_slice = Tensor::from_data( |
| acache.attn_weights.data[w_off..w_off + seq * seq].to_vec(), vec![seq, seq])?; |
|
|
| let (dq, dk, dv) = attention_backward( |
| &d_head_out, &q_slice, &k_slice, &v_slice, &w_slice, acache.scale, |
| )?; |
|
|
| |
| for s in 0..seq { |
| for dd in 0..d_head { |
| let idx = (b * seq + s) * d_model + h * d_head + dd; |
| dq_all[idx] += dq.data[s * d_head + dd]; |
| dk_all[idx] += dk.data[s * d_head + dd]; |
| dv_all[idx] += dv.data[s * d_head + dd]; |
| } |
| } |
| } |
| } |
|
|
| |
| let dq_t = Tensor::from_data(dq_all, vec![batch * seq, d_model])?; |
| let dk_t = Tensor::from_data(dk_all, vec![batch * seq, d_model])?; |
| let dv_t = Tensor::from_data(dv_all, vec![batch * seq, d_model])?; |
|
|
| let (dw_q, db_q, _) = linear_backward(&dq_t, &acache.input, &layer.self_attn.w_q)?; |
| let (dw_k, db_k, _) = linear_backward(&dk_t, &acache.input, &layer.self_attn.w_k)?; |
| let (dw_v, db_v, _) = linear_backward(&dv_t, &acache.input, &layer.self_attn.w_v)?; |
|
|
| |
| let attn_concat = dq_t.clone(); |
| let (dw_o, db_o, d_attn_in) = linear_backward(&d_ln1_input, &attn_concat, &layer.self_attn.w_o)?; |
|
|
| let d_attn_3d = d_attn_in.reshape(&[batch, seq, d_model])?; |
| d_x = d_x.add(&d_attn_3d)?; |
|
|
| |
| |
| |
| |
| |
| let cross_attn_zeros: Vec<Tensor> = layer.cross_attn.weights().iter() |
| .map(|w| Tensor::zeros(&w.shape)) |
| .collect(); |
| let ln2_gamma_zero = Tensor::zeros(&layer.ln2_gamma.shape); |
| let ln2_beta_zero = Tensor::zeros(&layer.ln2_beta.shape); |
|
|
| let mut layer_grads = vec![dw_q, dw_k, dw_v, dw_o, db_q, db_k, db_v, db_o]; |
| layer_grads.extend(cross_attn_zeros); |
| layer_grads.extend(vec![dw1, db1, dw2, db2]); |
| layer_grads.extend(vec![d_ln1_gamma, d_ln1_beta, ln2_gamma_zero, ln2_beta_zero, d_ln3_gamma, d_ln3_beta]); |
|
|
| decoder_grads.push(layer_grads); |
| } |
|
|
| |
| decoder_grads.reverse(); |
|
|
| |
| let indices: Vec<usize> = cache.token_indices.iter().map(|&id| id as usize).collect(); |
| let d_x_2d = d_x.reshape(&[batch * seq, d_model])?; |
| let d_embedding = embedding_backward(&d_x_2d, &indices, model.config.vocab_size)?; |
|
|
| |
| |
| let mut all_grads: Vec<Tensor> = Vec::new(); |
|
|
| |
| all_grads.push(d_embedding); |
|
|
| |
| for w in model.encoder.weights() { |
| all_grads.push(Tensor::zeros(&w.shape)); |
| } |
|
|
| |
| for layer_grads in decoder_grads { |
| all_grads.extend(layer_grads); |
| } |
| |
| all_grads.push(d_final_ln_gamma); |
| all_grads.push(d_final_ln_beta); |
|
|
| |
| all_grads.push(d_output_proj); |
| all_grads.push(d_output_bias); |
|
|
| |
| let weight_count = model.weights().len(); |
| if all_grads.len() != weight_count { |
| return Err(format!( |
| "Gradient count {} != weight count {}. Alignment error.", |
| all_grads.len(), weight_count |
| )); |
| } |
|
|
| Ok(all_grads) |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct AdamWConfig { |
| |
| pub lr: f32, |
| |
| pub beta1: f32, |
| |
| pub beta2: f32, |
| |
| pub epsilon: f32, |
| |
| pub weight_decay: f32, |
| } |
|
|
| impl Default for AdamWConfig { |
| fn default() -> Self { |
| Self { |
| lr: 3e-4, |
| beta1: 0.9, |
| beta2: 0.999, |
| epsilon: 1e-8, |
| weight_decay: 0.01, |
| } |
| } |
| } |
|
|
| |
| pub struct ParamState { |
| |
| pub m: Vec<f32>, |
| |
| pub v: Vec<f32>, |
| } |
|
|
| |
| |
| |
| pub struct AdamW { |
| pub config: AdamWConfig, |
| |
| pub states: Vec<ParamState>, |
| |
| pub step: u64, |
| } |
|
|
| impl AdamW { |
| |
| pub fn new(config: AdamWConfig, param_sizes: &[usize]) -> Self { |
| let states = param_sizes.iter().map(|&size| ParamState { |
| m: vec![0.0; size], |
| v: vec![0.0; size], |
| }).collect(); |
|
|
| Self { config, states, step: 0 } |
| } |
|
|
| |
| |
| |
| |
| pub fn step(&mut self, params: &mut [&mut Tensor], grads: &[&Tensor], current_lr: f32) { |
| self.step += 1; |
| let t = self.step as f32; |
|
|
| |
| let bc1 = 1.0 - self.config.beta1.powf(t); |
| let bc2 = 1.0 - self.config.beta2.powf(t); |
|
|
| for (i, (param, grad)) in params.iter_mut().zip(grads.iter()).enumerate() { |
| if i >= self.states.len() { |
| continue; |
| } |
| let state = &mut self.states[i]; |
|
|
| for j in 0..param.data.len() { |
| let g = grad.data[j]; |
|
|
| |
| state.m[j] = self.config.beta1 * state.m[j] + (1.0 - self.config.beta1) * g; |
|
|
| |
| state.v[j] = self.config.beta2 * state.v[j] + (1.0 - self.config.beta2) * g * g; |
|
|
| |
| let m_hat = state.m[j] / bc1; |
| let v_hat = state.v[j] / bc2; |
|
|
| |
| param.data[j] -= current_lr * m_hat / (v_hat.sqrt() + self.config.epsilon); |
|
|
| |
| param.data[j] -= current_lr * self.config.weight_decay * param.data[j]; |
| } |
| } |
| } |
|
|
| |
| pub fn memory_bytes(&self) -> usize { |
| self.states.iter().map(|s| (s.m.len() + s.v.len()) * 4).sum() |
| } |
|
|
| |
| pub fn save_state(&self) -> (Vec<Vec<f32>>, Vec<Vec<f32>>, u64) { |
| let m_states: Vec<Vec<f32>> = self.states.iter().map(|s| s.m.clone()).collect(); |
| let v_states: Vec<Vec<f32>> = self.states.iter().map(|s| s.v.clone()).collect(); |
| (m_states, v_states, self.step) |
| } |
|
|
| |
| pub fn load_state(&mut self, m_states: Vec<Vec<f32>>, v_states: Vec<Vec<f32>>, step: u64) { |
| for (i, (m, v)) in m_states.into_iter().zip(v_states.into_iter()).enumerate() { |
| if i < self.states.len() { |
| self.states[i].m = m; |
| self.states[i].v = v; |
| } |
| } |
| self.step = step; |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct TrainingExample { |
| |
| pub input_tokens: Vec<usize>, |
| |
| pub target: TrainingTarget, |
| |
| |
| pub weight: f32, |
| } |
|
|
| |
| #[derive(Debug, Clone)] |
| pub enum TrainingTarget { |
| |
| NextToken(usize), |
| |
| |
| GateDecision(f32), |
| } |
|
|
| |
| pub struct TrainingBatch { |
| pub examples: Vec<TrainingExample>, |
| } |
|
|
| impl TrainingBatch { |
| pub fn new() -> Self { |
| Self { examples: Vec::new() } |
| } |
|
|
| pub fn add(&mut self, example: TrainingExample) { |
| self.examples.push(example); |
| } |
|
|
| pub fn len(&self) -> usize { |
| self.examples.len() |
| } |
|
|
| pub fn is_empty(&self) -> bool { |
| self.examples.is_empty() |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct GradAccumulator { |
| |
| pub grads: Vec<Vec<f32>>, |
| |
| pub count: usize, |
| } |
|
|
| impl GradAccumulator { |
| |
| pub fn new(param_sizes: &[usize]) -> Self { |
| let grads = param_sizes.iter().map(|&size| vec![0.0f32; size]).collect(); |
| Self { grads, count: 0 } |
| } |
|
|
| |
| pub fn accumulate(&mut self, new_grads: &[&Tensor]) { |
| for (acc, grad) in self.grads.iter_mut().zip(new_grads.iter()) { |
| for (a, &g) in acc.iter_mut().zip(grad.data.iter()) { |
| *a += g; |
| } |
| } |
| self.count += 1; |
| } |
|
|
| |
| pub fn averaged(&self) -> Vec<Tensor> { |
| if self.count == 0 { |
| return self.grads.iter().map(|g| { |
| Tensor { data: g.clone(), shape: vec![g.len()] } |
| }).collect(); |
| } |
| let scale = 1.0 / self.count as f32; |
| self.grads.iter().map(|g| { |
| let data: Vec<f32> = g.iter().map(|&v| v * scale).collect(); |
| Tensor { data: data.clone(), shape: vec![data.len()] } |
| }).collect() |
| } |
|
|
| |
| pub fn reset(&mut self) { |
| for g in &mut self.grads { |
| g.fill(0.0); |
| } |
| self.count = 0; |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct TrainingMetrics { |
| |
| pub avg_loss: f32, |
| |
| pub total_steps: u64, |
| |
| pub gate_alignment: f32, |
| |
| pub gate_decisions_total: u64, |
| |
| pub gate_decisions_aligned: u64, |
| |
| pub current_lr: f32, |
| |
| pub loss_history: Vec<f32>, |
| } |
|
|
| impl TrainingMetrics { |
| pub fn new() -> Self { |
| Self { |
| avg_loss: 0.0, |
| total_steps: 0, |
| gate_alignment: 0.0, |
| gate_decisions_total: 0, |
| gate_decisions_aligned: 0, |
| current_lr: 0.0, |
| loss_history: Vec::new(), |
| } |
| } |
|
|
| |
| pub fn record_step(&mut self, loss: f32, lr: f32) { |
| self.total_steps += 1; |
| |
| let alpha = 0.01; |
| self.avg_loss = if self.total_steps == 1 { |
| loss |
| } else { |
| self.avg_loss * (1.0 - alpha) + loss * alpha |
| }; |
| self.current_lr = lr; |
|
|
| self.loss_history.push(loss); |
| if self.loss_history.len() > 100 { |
| self.loss_history.remove(0); |
| } |
| } |
|
|
| |
| pub fn record_gate_prediction(&mut self, predicted_allow: bool, actual_allow: bool) { |
| self.gate_decisions_total += 1; |
| if predicted_allow == actual_allow { |
| self.gate_decisions_aligned += 1; |
| } |
| self.gate_alignment = self.gate_decisions_aligned as f32 |
| / self.gate_decisions_total.max(1) as f32; |
| } |
|
|
| |
| pub fn is_converged(&self) -> bool { |
| self.gate_decisions_total >= 1000 && self.gate_alignment >= 0.95 |
| } |
|
|
| |
| pub fn loss_trend(&self) -> f32 { |
| if self.loss_history.len() < 20 { |
| return 0.0; |
| } |
| let recent = &self.loss_history[self.loss_history.len() - 10..]; |
| let older = &self.loss_history[self.loss_history.len() - 20..self.loss_history.len() - 10]; |
| let recent_avg: f32 = recent.iter().sum::<f32>() / 10.0; |
| let older_avg: f32 = older.iter().sum::<f32>() / 10.0; |
| recent_avg - older_avg |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_cross_entropy_loss() { |
| |
| let logits = Tensor::from_data( |
| vec![2.0, 1.0, 0.1, -1.0, |
| -1.0, 0.1, 2.0, 1.0], |
| vec![2, 4], |
| ).unwrap(); |
| let targets = vec![0, 2]; |
|
|
| let (loss, grad) = cross_entropy_loss(&logits, &targets).unwrap(); |
| assert!(loss > 0.0, "Loss should be positive"); |
| assert!(loss < 2.0, "Loss should be small for correct predictions"); |
| assert_eq!(grad.shape, vec![2, 4]); |
| assert!(grad.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_cross_entropy_wrong_prediction() { |
| let logits = Tensor::from_data( |
| vec![2.0, 0.0, 0.0, 0.0], |
| vec![1, 4], |
| ).unwrap(); |
| let targets_right = vec![0]; |
| let targets_wrong = vec![3]; |
|
|
| let (loss_right, _) = cross_entropy_loss(&logits, &targets_right).unwrap(); |
| let (loss_wrong, _) = cross_entropy_loss(&logits, &targets_wrong).unwrap(); |
| assert!(loss_wrong > loss_right, "Wrong prediction should have higher loss"); |
| } |
|
|
| #[test] |
| fn test_binary_ce_loss() { |
| let predictions = Tensor::from_data(vec![0.9, 0.1], vec![2]).unwrap(); |
| let labels = vec![1.0, 0.0]; |
| let weights = vec![1.0, 1.0]; |
|
|
| let (loss, grad) = binary_ce_loss(&predictions, &labels, &weights).unwrap(); |
| assert!(loss > 0.0); |
| assert!(loss < 1.0, "Loss should be small for correct predictions"); |
| assert_eq!(grad.shape, vec![2]); |
| } |
|
|
| #[test] |
| fn test_binary_ce_loss_fp_weight() { |
| let predictions = Tensor::from_data(vec![0.9, 0.9], vec![2]).unwrap(); |
| let labels = vec![1.0, 1.0]; |
| let weights_normal = vec![1.0, 1.0]; |
| let weights_fp = vec![4.0, 4.0]; |
|
|
| let (loss_normal, _) = binary_ce_loss(&predictions, &labels, &weights_normal).unwrap(); |
| let (loss_fp, _) = binary_ce_loss(&predictions, &labels, &weights_fp).unwrap(); |
| |
| assert!((loss_fp / loss_normal - 4.0).abs() < 0.01); |
| } |
|
|
| #[test] |
| fn test_attention_backward_shapes() { |
| let seq = 4; |
| let d_head = 8; |
| let d_output = Tensor::randn(&[seq, d_head], 1); |
| let q = Tensor::randn(&[seq, d_head], 2); |
| let k = Tensor::randn(&[seq, d_head], 3); |
| let v = Tensor::randn(&[seq, d_head], 4); |
| let attn_w = Tensor::randn(&[seq, seq], 5).softmax().unwrap(); |
| let scale = 1.0 / (d_head as f32).sqrt(); |
|
|
| let (dq, dk, dv) = attention_backward(&d_output, &q, &k, &v, &attn_w, scale).unwrap(); |
| assert_eq!(dq.shape, vec![seq, d_head]); |
| assert_eq!(dk.shape, vec![seq, d_head]); |
| assert_eq!(dv.shape, vec![seq, d_head]); |
| assert!(dq.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_linear_backward_shapes() { |
| let batch = 4; |
| let in_f = 8; |
| let out_f = 16; |
| let d_output = Tensor::randn(&[batch, out_f], 1); |
| let input = Tensor::randn(&[batch, in_f], 2); |
| let weight = Tensor::randn(&[out_f, in_f], 3); |
|
|
| let (dw, db, dx) = linear_backward(&d_output, &input, &weight).unwrap(); |
| assert_eq!(dw.shape, vec![out_f, in_f]); |
| assert_eq!(db.shape, vec![out_f]); |
| assert_eq!(dx.shape, vec![batch, in_f]); |
| } |
|
|
| #[test] |
| fn test_layer_norm_backward_shapes() { |
| let batch = 4; |
| let dim = 16; |
| let d_output = Tensor::randn(&[batch, dim], 1); |
| let input = Tensor::randn(&[batch, dim], 2); |
| let gamma = Tensor::ones(&[dim]); |
|
|
| let (dg, db, dx) = layer_norm_backward(&d_output, &input, &gamma, 1e-5).unwrap(); |
| assert_eq!(dg.shape, vec![dim]); |
| assert_eq!(db.shape, vec![dim]); |
| assert_eq!(dx.shape, vec![batch, dim]); |
| } |
|
|
| #[test] |
| fn test_embedding_backward() { |
| let d_output = Tensor::from_data( |
| vec![1.0, 2.0, 3.0, |
| 4.0, 5.0, 6.0], |
| vec![2, 3], |
| ).unwrap(); |
| let indices = vec![5, 2]; |
| let vocab_size = 10; |
|
|
| let grad = embedding_backward(&d_output, &indices, vocab_size).unwrap(); |
| assert_eq!(grad.shape, vec![10, 3]); |
| |
| assert_eq!(grad.data[5 * 3], 1.0); |
| assert_eq!(grad.data[2 * 3], 4.0); |
| assert_eq!(grad.data[0], 0.0); |
| } |
|
|
| #[test] |
| fn test_adamw_step() { |
| let config = AdamWConfig::default(); |
| let mut optimizer = AdamW::new(config, &[4]); |
|
|
| let mut param = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![4]).unwrap(); |
| let grad = Tensor::from_data(vec![0.1, 0.2, 0.3, 0.4], vec![4]).unwrap(); |
|
|
| let original = param.data.clone(); |
| optimizer.step(&mut [&mut param], &[&grad], 3e-4); |
|
|
| |
| assert!(param.data != original, "Params should change after optimizer step"); |
| assert_eq!(optimizer.step, 1); |
| } |
|
|
| #[test] |
| fn test_adamw_memory() { |
| let config = AdamWConfig::default(); |
| let optimizer = AdamW::new(config, &[1000, 2000, 500]); |
| |
| assert_eq!(optimizer.memory_bytes(), 28000); |
| } |
|
|
| #[test] |
| fn test_grad_accumulator() { |
| let mut acc = GradAccumulator::new(&[4, 2]); |
|
|
| let g1 = Tensor::from_data(vec![1.0, 2.0, 3.0, 4.0], vec![4]).unwrap(); |
| let g2 = Tensor::from_data(vec![10.0, 20.0], vec![2]).unwrap(); |
| acc.accumulate(&[&g1, &g2]); |
|
|
| let g3 = Tensor::from_data(vec![3.0, 4.0, 5.0, 6.0], vec![4]).unwrap(); |
| let g4 = Tensor::from_data(vec![30.0, 40.0], vec![2]).unwrap(); |
| acc.accumulate(&[&g3, &g4]); |
|
|
| let avg = acc.averaged(); |
| assert_eq!(avg[0].data, vec![2.0, 3.0, 4.0, 5.0]); |
| assert_eq!(avg[1].data, vec![20.0, 30.0]); |
| } |
|
|
| #[test] |
| fn test_training_example_weight() { |
| let normal = TrainingExample { |
| input_tokens: vec![1, 2, 3], |
| target: TrainingTarget::GateDecision(1.0), |
| weight: 1.0, |
| }; |
| assert_eq!(normal.weight, 1.0); |
|
|
| let fp = TrainingExample { |
| input_tokens: vec![1, 2, 3], |
| target: TrainingTarget::GateDecision(-1.0), |
| weight: 4.0, |
| }; |
| assert_eq!(fp.weight, 4.0); |
| } |
|
|
| #[test] |
| fn test_training_metrics_convergence() { |
| let mut metrics = TrainingMetrics::new(); |
| |
| for _ in 0..1000 { |
| metrics.record_gate_prediction(true, true); |
| } |
| assert!(metrics.is_converged()); |
|
|
| |
| let mut m2 = TrainingMetrics::new(); |
| for i in 0..1000 { |
| m2.record_gate_prediction(i % 2 == 0, true); |
| } |
| assert!(!m2.is_converged()); |
| } |
|
|
| #[test] |
| fn test_gelu_backward_finite() { |
| let dy = Tensor::randn(&[4, 8], 1); |
| let x = Tensor::randn(&[4, 8], 2); |
| let dx = gelu_backward(&dy, &x); |
| assert!(dx.data.iter().all(|v| v.is_finite())); |
| assert_eq!(dx.shape, x.shape); |
| } |
| } |
|
|