File size: 11,083 Bytes
1bb3ed7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 | import json, uuid, time, tempfile
import os
import traceback
import numpy as np
import sys
import re
from pathlib import Path
from pydantic import BaseModel, Field
class KernelProperties(BaseModel):
"""
Single Kernel Execution
"""
compiled: bool = False
correct: bool = False
runnable: bool = False
metadata: dict = Field(default_factory=dict)
# Function to load module from file path
def load_module_from_path(file_path):
parent_dir = str(Path(file_path).parent)
if parent_dir not in sys.path:
sys.path.append(parent_dir)
import importlib.util
spec = importlib.util.spec_from_file_location("module", file_path)
if spec is None or spec.loader is None:
raise ImportError(f"Could not load module from {file_path}")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def l2norm_allclose(v_k, v_r, rel_tol=1e-5):
return np.linalg.norm((v_k - v_r).astype(np.float64)) < rel_tol * np.linalg.norm(v_r.astype(np.float64))
def check_correctness_numpy(output_nki, output_task, res, rel_tol=2e-5):
# output_nki is a list
# output_task is a tuple or a single array
if not isinstance(output_task, tuple):
output_task_tuple = (output_task,)
else:
output_task_tuple = output_task
is_correct = True
if len(output_nki) != len(output_task_tuple):
res.metadata.setdefault("correctness_error", []).append(
f"Num outputs mismatch: nki={len(output_nki)} vs ref={len(output_task_tuple)}"
)
res.correct = False
return
for i, (v_k, v_r) in enumerate(zip(output_nki, output_task_tuple)):
if hasattr(v_r, "shape") and hasattr(v_k, "shape"):
if v_k.shape != v_r.shape:
res.metadata.setdefault("correctness_error", []).append(f"Output {i} shape mismatch, expected {v_r.shape}, got {v_k.shape}; ")
is_correct = False
if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
max_diff = np.amax(np.abs(v_k - v_r))
avg_diff = np.mean(np.abs(v_k - v_r))
max_rel_diff = np.amax(np.abs(v_k - v_r) / np.abs(v_r))
l2norm_diff = np.linalg.norm((v_k - v_r).astype(np.float64))
l2norm_ref = np.linalg.norm(v_r.astype(np.float64))
l2norm_rel_diff = l2norm_diff / l2norm_ref
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, max diff {max_diff:.6f}, avg diff {avg_diff:.6f}, max rel diff {max_rel_diff:.6f}, l2norm diff {l2norm_diff:.6f}, l2norm ref {l2norm_ref:.6f}, l2norm rel diff {l2norm_rel_diff:.6f}")
is_correct = False
else:
# abs_diff = np.abs(v_k - v_r)
if np.issubdtype(type(v_r), np.floating) or np.issubdtype(type(v_k), np.floating):
if not l2norm_allclose(v_k, v_r, rel_tol=rel_tol):
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k};")
is_correct = False
else:
if v_k != v_r:
res.metadata.setdefault("correctness_error", []).append(f"Output {i} value mismatch, expected {v_r}, got {v_k}; ")
is_correct = False
res.correct = is_correct
def check_precision_and_correctness(program_path, output_nki, output_task, res, rel_tol):
with open(program_path, 'r') as f:
program_code = f.read()
# Remove all the comments
program_code = re.sub(r'#.*', '', program_code)
# If "bfloat16" or "float16" is used
if "float16" in program_code:
res.metadata["correctness_error"] = "Float16 is used in the program."
res.correct = False
return
check_correctness_numpy(output_nki, output_task, res, rel_tol=rel_tol)
import neuronxcc.nki as nki
def get_latency(nki_kernel_fn, nki_inputs, artifact_dir):
kernel_id = uuid.uuid4()
neff_path = os.path.join(artifact_dir, f"neff_{kernel_id}.neff")
ntff_path = os.path.join(artifact_dir, f"ntff_{kernel_id}.ntff")
nki.baremetal(
nki_kernel_fn,
save_neff_name=neff_path,
save_trace_name=ntff_path,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
summary_profile_path = os.path.join(artifact_dir, f"profile_{kernel_id}.json")
summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
os.system(summary_profile_cmd)
summary = json.load(open(summary_profile_path, 'r'))
latency_ms = summary[next(iter(summary))]["total_time"] * 1e3
return latency_ms
def benchmark_latency(warmpup_iterations, benchmark_iterations, nki_kernel_fn, nki_inputs, artifact_dir):
for _ in range(warmpup_iterations):
nki.baremetal(
nki_kernel_fn,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
latency_ms_list = []
for _ in range(benchmark_iterations):
latency_ms = get_latency(nki_kernel_fn, nki_inputs, artifact_dir)
latency_ms_list.append(latency_ms)
runtime_stats = {
"mean_ms": np.mean(latency_ms_list),
"min_ms": np.min(latency_ms_list),
"max_ms": np.max(latency_ms_list),
"rel_diffs": (np.max(latency_ms_list) - np.min(latency_ms_list)) / np.min(latency_ms_list)
}
return runtime_stats
class NKIKernel:
def __init__(self, program_path: str, base_numpy_path: str):
self.program_path = program_path
self.base_numpy_path = base_numpy_path
self.res = KernelProperties()
self.rel_tol = 2e-5
self.perf_tol = 0.01
def profile(self, save_fields: list[str] = []):
os.environ["NEURON_CC_FLAGS"] = "--auto-cast=none"
os.environ['NEURON_RT_NUM_CORES']= '1'
np.random.seed(42)
task_module = load_module_from_path(self.base_numpy_path)
task_fn = task_module.forward
task_np_input_fn = task_module.get_inputs
task_np_inputs = task_np_input_fn()
task_nki_output_fn = task_module.transform_nki_outputs
self.res = KernelProperties()
new_profile_name = f"nki_{uuid.uuid4()}"
with tempfile.TemporaryDirectory(dir="/tmp", prefix=f"{new_profile_name}_") as artifact_dir:
neff_path = os.path.join(artifact_dir, f"kernel_file.neff")
ntff_path = os.path.join(artifact_dir, f"kernel_profile.ntff")
try:
nki_kernel_module = load_module_from_path(self.program_path)
if hasattr(nki_kernel_module, "kernel"):
nki_kernel_fn = nki_kernel_module.kernel
elif hasattr(nki_kernel_module, "optimized_kernel"):
nki_kernel_fn = nki_kernel_module.optimized_kernel
else:
raise ValueError(f"No kernel function found in {self.program_path}")
# Get the transform_to_nki_inputs function
if hasattr(task_module, "transform_to_nki_inputs"):
task_nki_input_fn = task_module.transform_to_nki_inputs
else:
raise ValueError(f"No transform_to_nki_inputs function found in {self.program_path} or {self.base_numpy_path}")
nki_inputs = task_nki_input_fn(task_np_inputs)
output_nki = nki.baremetal(
nki_kernel_fn,
save_neff_name=neff_path,
save_trace_name=ntff_path,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
self.res.compiled = True
self.res.runnable = True
except Exception as e:
print(f"Compilation failure. Error: {e}")
self.res.metadata["compilation_error"] = str(e)
self.res.metadata["compilation_traceback"] = traceback.format_exc()
return self.res
try:
for rnd_seed in [0, 21, 42, 63, 84]:
np.random.seed(rnd_seed)
task_np_inputs = task_np_input_fn()
nki_inputs = task_nki_input_fn(task_np_inputs)
output_task = task_fn(*task_np_inputs)
output_nki_raw = nki.baremetal(
nki_kernel_fn,
additional_compile_opt="--disable-dge --logical-nc-config=1"
)(*nki_inputs)
output_nki = task_nki_output_fn(output_nki_raw, output_task)
check_precision_and_correctness(self.program_path, output_nki, output_task, self.res, self.rel_tol)
if not self.res.correct:
break
except Exception as e:
print(f"Correct checking failure. Error: {e}")
self.res.metadata["correctness_error"] = str(e)
return self.res
if not self.res.correct:
return self.res
try:
runtime_stats = benchmark_latency(2, 10, nki_kernel_fn, nki_inputs, artifact_dir)
rel_diff = runtime_stats["rel_diffs"]
rel_diff_list = [rel_diff]
runtime_stats_list = [runtime_stats]
while rel_diff > self.perf_tol:
print(f"Retry: {self.program_path } at {len(rel_diff_list)}; rel_diffs: {rel_diff_list}")
time.sleep(1)
rel_diff_list.append(rel_diff)
runtime_stats_list.append(runtime_stats)
if len(rel_diff_list) > 2: # Just retry twice. In paper, we did 10 times.
break
runtime_stats = runtime_stats_list[np.argmin(rel_diff_list)]
self.res.metadata["latency"] = runtime_stats["mean_ms"]
self.res.metadata["min_ms"] = runtime_stats["min_ms"]
self.res.metadata["max_ms"] = runtime_stats["max_ms"]
self.res.metadata["rel_diffs"] = runtime_stats["rel_diffs"]
summary_profile_path = os.path.join(artifact_dir, f"{new_profile_name}_summary_profile.json")
summary_profile_cmd = f"neuron-profile view --output-format summary-json -n {neff_path} -s {ntff_path} > {summary_profile_path}"
os.system(summary_profile_cmd)
summary = json.load(open(summary_profile_path, 'r'))
profile_result = summary[next(iter(summary))]
for field in save_fields:
if field in profile_result.keys():
self.res.metadata[field] = profile_result[field]
except Exception as e:
print(f"Benchmarking failure. Error: {e}")
self.res.metadata["benchmarking_error"] = traceback.format_exc()
return self.res
return self.res |