| name: 04_kahan_softmax | |
| display_name: "Kahan-corrected Softmax" | |
| precision: fp32 | |
| regime: memory # softmax is bandwidth-bound: 2 passes over the input tensor | |
| # Softmax FLOPs: per-element exp + 2 reductions + divide. Roughly 5 flops/elt. | |
| flops_formula: "5 * batch * vocab" | |
| # Bytes moved: read x once, write y once. Both fp32. | |
| bytes_formula: "batch * vocab * 4 + batch * vocab * 4" | |
| hardware: [RTX_PRO_6000] | |
| peak_tflops_key: fp32 | |
| peak_bandwidth_key: dram | |
| # TIGHTER than default (fp32 default is 1e-4). This problem exists | |
| # specifically to test whether the agent uses compensated summation, so | |
| # we squeeze the tolerance to 1e-5 — naive fp16 sum across 256K elements | |
| # drifts past this; fp32 accumulation passes; Kahan/fp32 always passes. | |
| tolerance: | |
| "torch.float32": {"atol": 1.0e-5, "rtol": 1.0e-5} | |
| # Forbidden ops — block the obvious "just call the library" cheats. The | |
| # agent must implement softmax themselves with explicit (compensated) | |
| # summation logic. | |
| forbidden: | |
| - "torch.nn.functional.softmax" | |
| - "torch.softmax" | |
| - "F.softmax" | |
| - "liger_kernel.softmax" | |
| - "liger_kernel.transformers.softmax" | |
| - ".softmax(" | |
| sota: | |
| name: "Liger-Kernel Softmax (Triton)" | |
| url: "https://github.com/linkedin/Liger-Kernel" | |
| function: "liger_kernel.ops.softmax.LigerSoftmaxFunction" | |
| deps: | |
| - "liger-kernel>=0.5.0" | |
| reference_throughput_gbps_h100: 2800 | |
| num_correct_trials: 3 | |
| num_perf_trials: 30 | |