File size: 1,128 Bytes
80692f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
name: 01_fp8_gemm
display_name: "FP8 e4m3 GEMM"
precision: fp8_e4m3
regime: compute

# Dense-equivalent FLOPs per invocation. Evaluated per-shape; the benchmark
# substitutes M/N/K from each shapes.py entry.
flops_formula: "2 * M * N * K"

# Bytes moved (approximate): fp8 inputs in, bf16 output.
bytes_formula: "M*K + K*N + M*N*2"

hardware: [RTX_PRO_6000]
peak_tflops_key: fp8  # index into src/hardware/rtx_pro_6000.py peak_tflops_dense
peak_bandwidth_key: dram

# Per-dtype tolerance override (optional). Defaults are in src/eval/correctness.py.
tolerance:
  fp8_e4m3fn: 0.15

# Forbidden ops — using any of these in solution.py fails correctness post-hoc.
# torch._scaled_mm is Stanford-style cheating (just call the vendor library).
forbidden:
  - "torch._scaled_mm"
  - "torch.ops.aten._scaled_mm"

sota:
  name: "FlashInfer FP8 GEMM"
  url: "https://github.com/flashinfer-ai/flashinfer"
  function: "flashinfer.gemm.fp8_gemm"
  deps:
    - "flashinfer>=0.6.8"
  # Documented H100 throughput for this shape (informational, not graded):
  reference_throughput_tflops_h100: 550

num_correct_trials: 3
num_perf_trials: 30