name: 05_topk_bitonic display_name: "TopK via Bitonic Sort" precision: fp32 regime: memory # Top-k is dominated by the input read (small output, no reduction over k). # Comparator-network FLOPs are not the bottleneck on real hardware, so we # track them but score on bandwidth. flops_formula: "batch * n * 4" # ~O(n log^2 n) compares total, but the # bitonic network is so cheap relative # to memory that a coarse 4*n estimate # is fine for telemetry only. bytes_formula: "batch * n * 4 + batch * k * (4 + 8)" # fp32 input read + (fp32 value + int64 idx) output hardware: [RTX_PRO_6000] peak_tflops_key: fp32 peak_bandwidth_key: dram # Top-k correctness: # - VALUES must match within fp32 atol/rtol (the kth largest value is # well-defined modulo float-equal ties, so we use a loose-ish tol). # - INDICES are checked leniently: for each row, the multiset of returned # indices must select values that match ref values within tol. Direct # index equality is NOT required (ties in x can yield different valid # index sets). tolerance: float32: 1.0e-4 # Forbidden ops — using any of these in solution.py fails correctness post-hoc. # This problem is about IMPLEMENTING the selection, not dispatching to PyTorch's # tuned top-k. torch.sort is also banned because torch.topk falls back to it. forbidden: - "torch.topk" - "torch.kthvalue" - "torch.sort" - "torch.argsort" - "Tensor.topk" - "Tensor.kthvalue" - "Tensor.sort" - "Tensor.argsort" - "torch.ops.aten.topk" - "torch.ops.aten.sort" - "torch.ops.aten.kthvalue" sota: name: "torch.topk (cuTOPK / CUB internals)" url: "https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/cuda/TensorTopK.cu" function: "torch.topk" deps: [] # Informational: torch.topk dispatches to a radix-select kernel for moderate # k and to a bitonic sort kernel for small n. Beating it on the (1, 131072, # 64) decoder shape requires saturating DRAM bandwidth on the input read. reference_throughput_gbps_h100: 2400 num_correct_trials: 3 num_perf_trials: 50