| name: 01_fp8_gemm | |
| display_name: "FP8 e4m3 GEMM" | |
| precision: fp8_e4m3 | |
| regime: compute | |
| # Dense-equivalent FLOPs per invocation. Evaluated per-shape; the benchmark | |
| # substitutes M/N/K from each shapes.py entry. | |
| flops_formula: "2 * M * N * K" | |
| # Bytes moved (approximate): fp8 inputs in, bf16 output. | |
| bytes_formula: "M*K + K*N + M*N*2" | |
| hardware: [RTX_PRO_6000] | |
| peak_tflops_key: fp8 # index into src/hardware/rtx_pro_6000.py peak_tflops_dense | |
| peak_bandwidth_key: dram | |
| # Per-dtype tolerance override (optional). Defaults are in src/eval/correctness.py. | |
| tolerance: | |
| fp8_e4m3fn: 0.15 | |
| # Forbidden ops — using any of these in solution.py fails correctness post-hoc. | |
| # torch._scaled_mm is Stanford-style cheating (just call the vendor library). | |
| forbidden: | |
| - "torch._scaled_mm" | |
| - "torch.ops.aten._scaled_mm" | |
| sota: | |
| name: "FlashInfer FP8 GEMM" | |
| url: "https://github.com/flashinfer-ai/flashinfer" | |
| function: "flashinfer.gemm.fp8_gemm" | |
| deps: | |
| - "flashinfer>=0.6.8" | |
| # Documented H100 throughput for this shape (informational, not graded): | |
| reference_throughput_tflops_h100: 550 | |
| num_correct_trials: 3 | |
| num_perf_trials: 30 | |