repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/meta_schedule/testing/tune_onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import logging
from distutils.util import strtobool
import onnx # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.relay.frontend import from_onnx
from tvm.support import describe
from .tune_utils import create_timer, generate_input_data
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--model-name",
type=str,
required=True,
)
args.add_argument(
"--onnx-path",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help='example: `[{"name": "input1", "dtype": "int64", "shape": [1, 1, 8]}]',
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.model_name}")
onnx_model = onnx.load(ARGS.onnx_path)
shape_dict = {}
for item in ARGS.input_shape:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
shape_dict[item["name"]] = item["shape"]
mod, params = from_onnx(onnx_model, shape_dict, freeze_params=True)
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in ARGS.input_shape
}
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=ARGS.target,
params=params,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| 5,537 | 27.111675 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/custom_builder_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Customized builder and runner methods"""
# pylint: disable=import-outside-toplevel
from typing import TYPE_CHECKING, Dict, List, Optional, Union, Callable
if TYPE_CHECKING:
import numpy as np # type: ignore
from tvm.ir import IRModule
from tvm.meta_schedule.runner import EvaluatorConfig, RPCConfig
from tvm.runtime import Device, Module, NDArray
from tvm.target import Target
from tvm.runtime.vm import Executable
def build_relay(
mod: "IRModule",
target: "Target",
params: Dict[str, "NDArray"],
) -> "Module":
"""Build a Relay IRModule
Parameters
----------
mod : IRModule
The Relay IRModule to build.
target : Target
The target to build the module for.
params : Dict[str, NDArray]
The parameter dict to build the module with.
Returns
-------
mod : runtime.Module
The built module.
"""
from tvm.relay.build_module import _build_module_no_factory as relay_build
from tvm.runtime import Module
result = relay_build(mod, target=target, target_host=None, params=params)
assert isinstance(result, Module)
return result
def build_relay_with_tensorrt(
mod: "IRModule",
target: "Target",
params: Dict[str, "NDArray"],
) -> "Module":
"""Build a Relay IRModule with TensorRT BYOC
Parameters
----------
mod : IRModule
The Relay IRModule to build.
target : Target
The target to build the module for.
params : Dict[str, NDArray]
The parameter dict to build the module with.
Returns
-------
mod : runtime.Module
The built module.
"""
from tvm.ir.transform import PassContext
from tvm.relay.build_module import _build_module_no_factory as relay_build
from tvm.relay.op.contrib import tensorrt
from tvm.runtime import Module
mod = tensorrt.partition_for_tensorrt(mod, params)
with PassContext(opt_level=3):
result = relay_build(mod, target=target, target_host=None, params=params)
assert isinstance(result, Module)
return result
def run_with_graph_executor(
rt_mod: "Module",
device: "Device",
evaluator_config: "EvaluatorConfig",
repeated_args: List["NDArray"],
) -> List[float]:
"""Run a Relay module with GraphExecutor
Parameters
----------
rt_mod : Module
The Relay module to run.
device : Device
The device to run the module on.
evaluator_config : EvaluatorConfig
The evaluator configuration to run the module with.
repeated_args : List[NDArray]
The list of repeated arguments to run the module with.
Returns
-------
results : List[float]
The list of results.
"""
import itertools
from tvm.contrib.graph_executor import GraphModule
graph_mod = GraphModule(rt_mod["default"](device))
evaluator = graph_mod.module.time_evaluator(
func_name="run",
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs = []
for args in repeated_args:
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
def run_module_via_rpc(
rpc_config: "RPCConfig",
lib: Union["Module", "Executable"],
dev_type: str,
args: Union[Dict[int, "np.ndarray"], Dict[str, "np.ndarray"]],
continuation: Callable,
backend: Optional[str] = "graph",
):
"""Execute a tvm.runtime.Module on RPC remote"""
# pylint: disable=import-outside-toplevel
import os
import tempfile
from tvm.contrib.tar import tar
from tvm.runtime import ndarray
# pylint: enable=import-outside-toplevel
with tempfile.TemporaryDirectory() as tmp_dir:
filename = os.path.join(tmp_dir, "tvm_tmp_mod." + tar.output_format)
if backend == "vm":
code, lib = lib.save()
lib.export_library(filename, tar)
session = rpc_config.connect_server()
session.upload(filename)
_, filename = os.path.split(filename)
rt_mod = session.load_module(filename)
if backend == "vm":
rt_mod = session.get_function("runtime.Load_Executable")(code, rt_mod)
dev = session.device(dev_type=dev_type, dev_id=0)
nd_args = {k: ndarray.array(v, dev) for k, v in args.items()}
return continuation(rt_mod, dev, nd_args)
| 5,448 | 30.316092 | 83 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/dummy_object.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dummy objects for testing."""
import random
from typing import List, Optional
from tvm.tir.schedule import Trace
from ..builder import BuilderInput, BuilderResult, PyBuilder
from ..mutator import PyMutator
from ..runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from ..tune_context import TuneContext # pylint: disable=unused-import
from ..utils import derived_object
@derived_object
class DummyRunnerFuture(PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> RunnerResult:
run_secs = [random.uniform(5, 30) for _ in range(random.randint(1, 10))]
return RunnerResult(run_secs, None)
@derived_object
class DummyBuilder(PyBuilder):
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
return [BuilderResult("test_path", None) for _ in build_inputs]
@derived_object
class DummyRunner(PyRunner):
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
return [DummyRunnerFuture() for _ in runner_inputs] # type: ignore
@derived_object
class DummyMutator(PyMutator):
"""Dummy Mutator for testing"""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, trace: Trace, _) -> Optional[Trace]:
return Trace(trace.insts, {})
def clone(self):
return DummyMutator()
| 2,182 | 33.109375 | 86 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/validate_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""JSON Database validation script"""
import argparse
import logging
import warnings
import itertools
from statistics import mean
from distutils.util import strtobool
from typing import Callable, Tuple, Union, List, Any
import numpy as np # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm._ffi import get_global_func, register_func
from tvm.ir import IRModule
from tvm.support import describe
from tvm.target import Target
from tvm.tir import Schedule
from tvm.tir.schedule import Trace
from tvm.meta_schedule.utils import remove_build_dir
from tvm.meta_schedule.testing.tune_utils import generate_input_data
from tvm.tir.tensor_intrin import * # type: ignore # pylint: disable=wildcard-import,unused-wildcard-import
DELIMITOR = "\n" + "-" * 30 + "\n"
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--work-dir",
type=str,
required=True,
help="The path to the work directory containing database files.",
)
args.add_argument(
"--target",
type=Target,
required=True,
)
args.add_argument(
"--baseline-target",
type=Target,
default="llvm -num-cores=1",
required=False,
help="The baseline target to compile the original module.",
)
args.add_argument(
"--top-k",
type=int,
default=10**9,
required=False,
help="The number of top-k tuning records to validate for each unique original workload.",
)
args.add_argument(
"--rpc-host",
type=str,
)
args.add_argument(
"--rpc-port",
type=int,
)
args.add_argument(
"--rpc-key",
type=str,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--input-generator-func",
type=str,
default="tvm.meta_schedule.testing.default_input_generator",
)
args.add_argument(
"--check-metric-func",
type=str,
default="tvm.meta_schedule.testing.default_check_metric",
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
if parsed.rpc_host is not None and parsed.rpc_port is not None and parsed.rpc_key is not None:
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
else:
parsed.rpc_config = None
warnings.warn("RPC config is not provided, will use local runner.")
if parsed.cpu_flush and parsed.target.kind.name != "llvm":
warnings.warn("cpu_flush is only supported on llvm target")
return parsed
# arg parser
ARGS = _parse_args()
# logging
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
logging.getLogger("tvm.meta_schedule.runner").setLevel(logging.WARN)
def get_device_type(target: Target) -> str:
"""Get the device type string from a target.
Parameters
----------
target : Target
The target to get the device type from.
Returns
-------
device_type : str
The device type string.
"""
if target.kind.name == "llvm":
return "cpu"
elif target.kind.name == "cuda":
return "cuda"
else:
raise RuntimeError(f"Unsupported target kind for device type: {target.kind.name}")
def get_runtime_device(target: Target) -> tvm.runtime.Device:
"""Get the runtime device from a target.
Parameters
----------
target : Target
The target to get the runtime device from.
Returns
-------
device : tvm.runtime.Device
The runtime device.
"""
if target.kind.name == "llvm":
return tvm.cpu()
elif target.kind.name == "cuda":
return tvm.cuda()
else:
raise RuntimeError(f"Unsupported target kind for runtime device: {target.kind.name}")
def check_and_run(func: Union[str, Callable], *args, **kwargs) -> Any:
"""Check if the function is a string or a callable, and run it."""
if isinstance(func, str):
func = get_global_func(func)
return func(*args, **kwargs) # type: ignore
class OriginalModule:
"""Original module class for deduplication."""
def __init__(self, mod: IRModule):
self.mod = mod
def __eq__(self, __o: "OriginalModule") -> bool: # type: ignore
return tvm.ir.structural_equal(self.mod, __o.mod)
def __hash__(self) -> int:
return tvm.ir.structural_hash(self.mod)
def initializer() -> None:
"""Initializer function to register the functions on PopenWorker."""
@register_func("tvm.meta_schedule.testing.default_check_metric")
def default_check_metric( # pylint: disable=unused-variable,unreachable-code
lhs: List[tvm.nd.NDArray], rhs: List[tvm.nd.NDArray]
) -> bool:
"""Check if the outputs are equal
Parameters
----------
lhs : List[tvm.nd.NDArray]
The first list of NDArrays to compare.
rhs : List[tvm.nd.NDArray]
The second list of NDArrays to compare.
Returns
-------
is_equal : bool
Whether the two lists of NDArrays are equal.
"""
assert len(lhs) == len(rhs), "Different number of outputs from two modules"
for i in range(len(lhs)): # pylint: disable=consider-using-enumerate
if not np.allclose(lhs[i].numpy(), rhs[i].numpy(), rtol=1e-3, atol=2e-3):
return False
return True
@register_func("tvm.meta_schedule.testing.default_input_generator")
def default_input_generator( # pylint: disable=unused-variable
mod: IRModule,
) -> List[tvm.nd.NDArray]:
"""Default input generator function
Parameters
----------
mod : IRModule
The IRModule to generate the input data for.
Returns
-------
inputs : List[tvm.nd.NDArray]
The generated input data.
"""
args_info = ms.arg_info.TensorInfo.from_prim_func(mod["main"])
inputs = [
tvm.nd.array(generate_input_data(input_shape=arg_info.shape, input_dtype=arg_info.dtype))
for arg_info in args_info
]
return inputs
def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:
"""Convert a list of TVM NDArray to a list of numpy array
Parameters
----------
a : List[tvm.nd.NDArray]
The list of TVM NDArray to be converted
Returns
-------
b : List[np.ndarray]
The list of numpy array
"""
assert a is not None, "Empty result cannot be converted to numpy"
return [x.numpy() for x in a]
def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:
"""Convert a list of numpy array to a list of TVM NDArray
Parameters
----------
a : List[np.ndarray]
The list of numpy array to be converted.
Returns
-------
b : List[tvm.nd.NDArray]
The list of TVM NDArray.
"""
assert a is not None, "Empty result cannot be converted to TVM NDArray"
return [tvm.nd.array(x) for x in a]
def is_failed_record(record: ms.database.TuningRecord) -> bool:
"""Check if a tuning record is failed.
Parameters
----------
record : TuningRecord
The tuning record to check.
Returns
-------
is_failed : bool
"""
return len(record.run_secs) == 1 and record.run_secs[0] == 1e9
def print_with_counter_func(counter: int, total: int) -> Callable:
"""Print with counter
Parameters
----------
counter : int
The counter to print with.
total : int
The total number of items to print with.
Returns
-------
print_result : Callable
The print result function.
"""
def print_result(
result: str,
*,
original_mod: IRModule = None,
scheduled_mod: IRModule = None,
inputs: List[np.ndarray] = None,
original_res: List[np.ndarray] = None,
scheduled_res: List[np.ndarray] = None,
original_run_secs: List[float] = None,
scheduled_run_secs: List[float] = None,
exception: Exception = None,
trace: str = None,
) -> None:
"""Print the validation result."""
status = f"Progress {counter: 6d} / {total: 6d} (estimated) checked, result: {result:>10}, "
if result in ["pass", "wrong answer"]:
status += (
f"original: {mean(original_run_secs) * 1e3: 10.3f} ms, "
f"scheduled: {mean(scheduled_run_secs) * 1e3: 10.3f} ms"
)
output = [status]
if result not in ["pass", "skip"]:
output.extend(
[
"Original IRModule:" + DELIMITOR + original_mod.script(),
"Scheduled IRModule:" + DELIMITOR + scheduled_mod.script(),
"Trace" + DELIMITOR + str(trace),
]
)
if result == "wrong answer":
output.extend(
[
"Input:" + DELIMITOR + str(inputs),
"Original Result:" + DELIMITOR + str(original_res),
"Scheduled Result:" + DELIMITOR + str(scheduled_res),
"Max Diff:"
+ DELIMITOR
+ str(
[
np.max(np.abs(original_res[i] - scheduled_res[i]))
for i in range(len(original_res))
]
)
+ "\n",
]
)
elif result == "exception":
output.extend(["Exception:" + DELIMITOR + str(exception) + "\n"])
else:
raise ValueError(f"Unknown result: {result}")
print("\n\n".join(output))
return print_result
def make_alloc_arg_and_check(
inputs: List[np.ndarray],
original_mod: IRModule,
scheduled_mod: IRModule,
trace: str,
original_res: List[np.ndarray],
original_run_secs: List[float],
print_result: Callable,
) -> Tuple[Callable, Callable]:
"""Make alloc_arg and check functions for the given inputs and collect results.
Parameters
----------
inputs : List[np.ndarray]
The inputs to the two modules.
original_mod : IRModule
The original IRModule.
scheduled_mod : IRModule
The scheduled IRModule.
trace : str
The trace of the scheduled IRModule.
original_res : List[np.ndarray]
The original results.
original_run_secs : List[float]
The original run times.
print_result : Callable
The print result function.
Returns
-------
f_with_args_alloc_argument : Callable
The function to allocate arguments.
f_with_args_run_evaluator : Callable
The function to run evaluator.
"""
def f_with_args_alloc_argument_common(
device: tvm.runtime.Device,
args_info: ms.runner.rpc_runner.T_ARG_INFO_JSON_OBJ_LIST, # pylint: disable=unused-argument
alloc_repeat: int,
) -> List[ms.runner.rpc_runner.T_ARGUMENT_LIST]:
"""Allocate arguments using the given inputs.
Parameters
----------
session : RPCSession
The RPC session.
device : Device
The device.
args_info : T_ARG_INFO_JSON_OBJ_LIST
argument information.
alloc_repeat : int
The number of times to repeat the allocation.
Returns
-------
args_list : List[T_ARGUMENT_LIST]
The list of argument lists.
"""
return [[tvm.nd.array(arg, device=device) for arg in inputs] for _ in range(alloc_repeat)]
def f_with_args_run_evaluator_common(
rt_mod: tvm.runtime.Module,
device: tvm.runtime.Device,
evaluator_config: ms.runner.EvaluatorConfig,
repeated_args: List[ms.runner.rpc_runner.T_ARGUMENT_LIST],
) -> List[float]:
"""With args function to run the evaluator
Parameters
----------
session : tvm.rpc.RPCSession
The RPC session
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
assert len(repeated_args) == 1, "Only support one set of arguments"
scheduled_res = [arg.numpy() for arg in repeated_args[0]] # type: ignore
# fetch comparison function
passed = check_and_run(
ARGS.check_metric_func,
to_tvm_ndarray(original_res),
to_tvm_ndarray(scheduled_res),
)
print_result(
result="pass" if passed else "wrong answer",
original_mod=original_mod,
scheduled_mod=scheduled_mod,
trace=trace,
inputs=inputs,
original_res=original_res,
scheduled_res=scheduled_res,
original_run_secs=original_run_secs,
scheduled_run_secs=costs,
)
return costs
def f_with_args_alloc_argument_rpc(
rpc_session: ms.runner.rpc_runner.RPCSession, # pylint: disable=unused-argument
device: tvm.runtime.Device,
args_info: ms.runner.rpc_runner.T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[ms.runner.rpc_runner.T_ARGUMENT_LIST]:
return f_with_args_alloc_argument_common(device, args_info, alloc_repeat)
def f_with_args_run_evaluator_rpc(
rpc_session: ms.runner.rpc_runner.RPCSession, # pylint: disable=unused-argument
rt_mod: tvm.runtime.Module,
device: tvm.runtime.Device,
evaluator_config: ms.runner.EvaluatorConfig,
repeated_args: List[ms.runner.rpc_runner.T_ARGUMENT_LIST],
) -> List[float]:
return f_with_args_run_evaluator_common(rt_mod, device, evaluator_config, repeated_args)
if ARGS.rpc_config is None:
return f_with_args_alloc_argument_common, f_with_args_run_evaluator_common
else:
return f_with_args_alloc_argument_rpc, f_with_args_run_evaluator_rpc
def local_build_and_run(
mod: IRModule,
target: Target,
device: tvm.runtime.Device,
inputs: List[np.ndarray],
) -> Tuple[List[np.ndarray], List[float]]:
"""Build and run the module locally.
Parameters
----------
mod: IRModule
The module to build and run
target: Target
The target to build the module
device: Device
The device to run the module
inputs: List[np.ndarray]
The inputs to run the module
Returns
-------
res: List[np.ndarray]
The results of running the module
run_secs: List[float]
The running time of running the module
"""
# potential memory leak https://github.com/apache/tvm/issues/11096
lib = tvm.build(mod, target=target)
tvm_inputs = [tvm.nd.array(inp, device=device) for inp in inputs]
device.sync()
func = lib.time_evaluator(lib.entry_name, dev=device, number=ARGS.number, repeat=ARGS.repeat)
benchmark_res = func(*tvm_inputs)
device.sync()
return [arg.numpy() for arg in tvm_inputs], list(benchmark_res.results)
def _check_builder_result(builder_result: ms.builder.BuilderResult) -> None:
"""Check if the builder result is defined.
Parameters
----------
builder_result: BuilderResult
The builder result
"""
assert builder_result.error_msg is None, "Builder failed: " + str(
builder_result.error_msg if builder_result.error_msg else "Empty error message"
)
def _apply_trace(mod: IRModule, trace: Trace) -> IRModule:
"""Apply the trace to the module.
Parameters
----------
mod: IRModule
The module to apply the trace to
trace: Trace
The trace to apply
Returns
-------
mod: IRModule
The module with the trace applied
"""
sch = Schedule(mod)
trace.apply_to_schedule(sch, remove_postproc=False)
return sch.mod
def _build_all_mods(
mods: List[IRModule], builder: ms.builder.Builder, target: Target
) -> List[ms.builder.BuilderResult]:
"""Build all the modules.
Parameters
----------
mods: List[IRModule]
The modules to build
builder: Builder
The builder to build the modules
target: Target
The target to build the modules
Returns
-------
builder_results: List[BuilderResult]
The builder results
"""
builder_results = builder.build([ms.builder.BuilderInput(mod, target) for mod in mods])
assert len(builder_results) == len(
mods
), f"Unexpected number of build results, expected {len(mods)} got {len(builder_results)}"
return builder_results
def _run_single_mod(
builder_result: ms.builder.BuilderResult,
runner: ms.runner.Runner,
dev_type: str,
) -> None:
"""Run a single module.
Parameters
----------
builder_result: BuilderResult
The builder result
runner: Runner
The runner to run the module
dev_type: str
The device type
"""
runner_futures = runner.run(
# arginfo is not used in this case so we can pass an empty list
[ms.runner.RunnerInput(builder_result.artifact_path, device_type=dev_type, args_info=[])]
)
assert (
len(runner_futures) == 1
), f"Unexpected number of runner futures, expected 1 got {len(runner_futures)}"
(runner_future,) = runner_futures # pylint: disable=unbalanced-tuple-unpacking
runner_res = runner_future.result()
assert runner_res.error_msg is None, "Runner failed: " + (
runner_res.error_msg if runner_res.error_msg else "Empty error message"
)
def main():
"""Main function"""
describe()
with ms.Profiler() as profiler:
# initialize
target = ARGS.target
dev_type = get_device_type(target)
builder = ms.builder.LocalBuilder()
database = ms.database.create(work_dir=ARGS.work_dir)
# collect records
with profiler.timeit("collect records"):
records = database.get_all_tuning_records()
total = len(records)
print(
f"Total {total} records to be validated. "
f"Collected in {float(profiler.get()['collect records']): 3.3f} sec."
)
# collect unique original TIR
with profiler.timeit("deduplicate records"):
workloads = set()
for record in records:
workloads.add(OriginalModule(record.workload.mod))
print(
f"Total {len(workloads)} unique original TIR to validate. "
f"Deduplicated in {float(profiler.get()['deduplicate records']): 3.3f} sec."
)
if ARGS.top_k < 10**9:
print(f"Top {ARGS.top_k} records for each original TIR will be validated.")
total = len(workloads) * ARGS.top_k
print()
# validate correctness
counter = 0
for item in workloads:
original_mod = item.mod
records = database.get_top_k(
workload=database.commit_workload(original_mod), top_k=ARGS.top_k
)
if len(records) < ARGS.top_k:
total -= ARGS.top_k - len(records)
inputs = to_numpy(check_and_run(ARGS.input_generator_func, original_mod))
original_res, original_run_secs = local_build_and_run(
original_mod,
target=ARGS.baseline_target,
inputs=inputs,
device=get_runtime_device(ARGS.baseline_target),
)
scheduled_mods = [_apply_trace(original_mod, record.trace) for record in records]
builder_results = _build_all_mods(scheduled_mods, builder, target) # type: ignore
for i, record in enumerate(records):
counter += 1
print_result = print_with_counter_func(counter=counter, total=total)
if is_failed_record(record):
# skip failed records where run_secs is 1e9
# these records are only negative samples for cost model
print_result(result="skip")
continue
try:
# prepare scheduled module
scheduled_mod = scheduled_mods[i]
# check build result
builder_result = builder_results[i]
_check_builder_result(builder_result)
# fetch functions
(
f_with_args_alloc_argument,
f_with_args_run_evaluator,
) = make_alloc_arg_and_check(
inputs,
original_mod,
scheduled_mod,
str(record.trace),
original_res=original_res,
original_run_secs=original_run_secs,
print_result=print_result,
)
# create runner
evaluator_config = ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
)
if ARGS.rpc_config is not None:
runner: ms.Runner = ms.runner.RPCRunner( # type: ignore
ARGS.rpc_config,
evaluator_config=evaluator_config,
alloc_repeat=1,
f_alloc_argument=f_with_args_alloc_argument,
f_run_evaluator=f_with_args_run_evaluator,
initializer=initializer,
)
else:
runner: ms.Runner = ms.runner.LocalRunner( # type: ignore
evaluator_config=evaluator_config,
alloc_repeat=1,
f_alloc_argument=f_with_args_alloc_argument,
f_run_evaluator=f_with_args_run_evaluator,
initializer=initializer,
)
# run and validate
_run_single_mod(builder_result, runner, dev_type) # type: ignore
except Exception as e: # pylint: disable=broad-except, invalid-name
# validation failed with exception
print_result(
result="exception",
original_mod=original_mod,
scheduled_mod=scheduled_mod,
trace=str(record.trace),
exception=e,
)
# clean up
remove_build_dir(builder_result.artifact_path)
print(f"Validation finished! Total time spent: {float(profiler.get()['Total']): 3.3f} sec.")
if __name__ == "__main__":
main()
| 25,351 | 31.670103 | 108 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/dataset_sample_candidates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import json
import os
from typing import List
from tqdm import tqdm # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.ir import load_json
from tvm.target import Target
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--task_cache_dir", type=str, help="Please provide the full path to the extracted tasks."
)
parser.add_argument(
"--candidate_cache_dir",
type=str,
help="Please provide the full path to save the sampled candidates.",
)
parser.add_argument(
"--target",
type=str,
default="nvidia/geforce-rtx-3070",
help="Please specify the target hardware for tuning.\
Note: for generating dataset, the hardware does not need to be present.",
)
parser.add_argument(
"--init_population_size",
type=int,
default=256,
help="The initial population size used in evolutionary search.",
)
parser.add_argument(
"--num_samples_per_task",
type=int,
default=400,
help="The number of samples to gather per tuning task.",
)
parser.add_argument(
"--num_trials_per_iter",
type=int,
default=64,
help="The number of trials per iteration in evolutionary search.",
)
parser.add_argument(
"--max_trials_per_task",
type=int,
default=400,
help="The maximum number of trials per task in evolutionary search.",
)
parser.add_argument(
"--max_retry_per_task",
type=int,
default=10,
help="The maximum number of retry attempts allowed.",
)
parser.add_argument(
"--file_group",
type=int,
default=0,
help="To enable running multiple scripts in parallel, files [idx * 10 : (idx + 1) * 10]\
in the sorted file list from the given directory will be run.",
)
return parser.parse_args()
# pylint: disable=too-many-locals
def sample_candidates(task, task_name, model_name):
"""Randomly sample candidates for a task and save the candidates in the given directory.
Parameters
----------
task : IRModule
The initial ir module used for generating the search space.
task_name : str
The name of the task.
model_name : str
The name of the model.
Returns
-------
None
"""
candidate_path = os.path.join(
args.candidate_cache_dir, model_name, task_name + "_candidates.json"
)
workload_path = os.path.join(args.candidate_cache_dir, model_name, task_name + "_workload.json")
database = ms.database.JSONDatabase(
path_workload=workload_path,
path_tuning_record=candidate_path,
)
sample_init_population = tvm.get_global_func(
"meta_schedule.SearchStrategyEvolutionarySearchSampleInitPopulation"
)
evolve_with_cost_model = tvm.get_global_func(
"meta_schedule.SearchStrategyEvolutionarySearchEvolveWithCostModel"
)
strategy = ms.search_strategy.EvolutionarySearch(init_measured_ratio=0.0)
target = Target(args.target)
context = ms.TuneContext(
mod=task,
target=target,
space_generator="post-order-apply",
search_strategy=strategy,
task_name=task_name,
)
context.initialize()
context.pre_tuning(
max_trials=args.max_trials_per_task,
num_trials_per_iter=args.num_trials_per_iter,
design_spaces=context.generate_design_space(),
database=database,
cost_model=ms.cost_model.RandomModel(), # type: ignore
)
all_states: List[tvm.tir.Schedule] = []
num_retry, itr = 0, 0
states = sample_init_population(strategy, args.init_population_size)
while len(all_states) < args.num_samples_per_task and num_retry < args.max_retry_per_task:
states = evolve_with_cost_model(strategy, states, len(states))
all_states += states
if len(states) == 0:
states = sample_init_population(strategy, args.init_population_size)
num_retry += 1
else:
num_retry = 0
print(f"iter: {itr}, number of states sampled: {len(all_states)}")
itr += 1
all_states = all_states[: args.num_samples_per_task]
workload = ms.database.Workload(context.mod)
database.commit_workload(context.mod)
for state in all_states:
database.commit_tuning_record(ms.database.TuningRecord(state.trace, workload))
args = _parse_args() # pylint: disable=invalid-name
def main():
if not os.path.isdir(args.task_cache_dir):
raise Exception("Please provide a correct task cache dir.")
try:
os.makedirs(args.candidate_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.candidate_cache_dir} cannot be created successfully.")
task_paths = sorted(glob.glob(os.path.join(args.task_cache_dir, "*.json")))[
args.file_group * 10 : (args.file_group + 1) * 10
]
print(f"Selected models: {task_paths}")
for num, task_path in enumerate(task_paths):
print(f"Processing model {num} ...")
with open(task_path, "rb") as file:
tasks = file.readlines()
model_name = task_path.split("/")[-1][len("relay-") :][: -len("_extracted_tasks.json")]
os.makedirs(os.path.join(args.candidate_cache_dir, model_name), exist_ok=True)
for task_str in tqdm(tasks):
task_name, task_mod = json.loads(task_str)
task_mod = load_json(json.dumps(task_mod))
sample_candidates(task_mod, task_name, model_name)
if __name__ == "__main__":
main()
| 6,498 | 33.569149 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/tlcbench.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,import-outside-toplevel
# type: ignore
"""Model loader for TLCBench."""
import logging
import multiprocessing
import os
import tvm
from tvm import relay
from tvm.contrib.download import download_testdata
log = logging.getLogger(__name__)
def _convert(args):
onnx_model, shape_dict, json_path, params_path = args
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict, freeze_params=True)
seq = tvm.transform.Sequential(
[relay.transform.InferType(), relay.transform.FakeQuantizationToInteger(use_qat=True)]
)
mod = seq(mod)
with open(json_path, "w") as fo:
fo.write(tvm.ir.save_json(mod))
with open(params_path, "wb") as fo:
fo.write(relay.save_param_dict(params))
def convert_to_qnn(onnx_path, json_path, params_path, input_info):
"""Run the ONNX frontend and the FQ2I pass. The output is serialized to disk."""
import onnx
onnx_model = onnx.load(onnx_path)
shape_dict = dict(input_info)
log.info("Converting te ONNX model to Relay and running the FQ2I pass, it may take a while...")
with multiprocessing.Pool(processes=1) as pool:
pool.map(_convert, [(onnx_model, shape_dict, json_path, params_path)])
def deserialize_relay(json_path, params_path):
with open(json_path, "r") as fi:
mod = tvm.ir.load_json(fi.read())
with open(params_path, "rb") as fi:
params = relay.load_param_dict(fi.read())
return mod, params
def load_quantized_bert_base(batch_size=1, seq_len=384):
"""
Load the quantized bert-base model from TLCBench, possibly downloading it from github
and caching the converted int8 QNN module to disk.
In addition to returing the relay module and its parameters, it also returns input name
and shape information, which can be used at the deployment time as follows:
```
mod, params, input_info = load_quantized_bert_base()
...
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
for name, shape in input_info:
arr = np.random.uniform(1, 10, size=shape).astype("int64")
runtime.set_input(name, arr)
runtime.run()
```
"""
url = "https://github.com/tlc-pack/TLCBench/raw/main/models/bert-base-qat.onnx"
log.info("Downloading quantized bert-base model.")
onnx_path = download_testdata(url, "bert-base-qat.onnx", module="tlcbench")
data_dir = os.path.dirname(onnx_path)
json_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.json" % (batch_size, seq_len))
params_path = os.path.join(data_dir, "bert_base_int8_b%d_s%d.params" % (batch_size, seq_len))
# Input names and order encoded in the ONNX model
input_info = [
("input_ids", (batch_size, seq_len)),
("segment_ids", (batch_size, seq_len)),
("input_mask", (batch_size, seq_len)),
]
if not os.path.exists(json_path) or not os.path.exists(params_path):
convert_to_qnn(onnx_path, json_path, params_path, input_info)
def deserialize():
try:
return deserialize_relay(json_path, params_path)
except ValueError:
# A serialized Relay json file may become invalid after TVM bump
# Update the serialized model and try loading again
convert_to_qnn(onnx_path, json_path, params_path, input_info)
return deserialize_relay(json_path, params_path)
mod, params = deserialize()
return mod, params, input_info
| 4,266 | 33.691057 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/dataset_extract_tasks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import json
import os
from tqdm import tqdm # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.ir import save_json
from tvm.meta_schedule.testing.relay_workload import _load_cache
from tvm.runtime import load_param_dict
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_cache_dir", type=str, help="Please provide the full path to the model cache dir."
)
parser.add_argument(
"--task_cache_dir", type=str, help="Please provide the full path to save extracted tasks."
)
parser.add_argument(
"--target", type=str, default="cuda", help="Please specify the target hardware for tuning."
)
return parser.parse_args()
# pylint: disable=too-many-locals
def extract_and_save_tasks(cache_file):
"""Extract tuning tasks and cache the nonspatial ones in the given directory.
Parameters
----------
cache_file : str
The filename of the cached model.
Returns
-------
None
"""
mod, params_bytearray, _ = _load_cache(args.model_cache_dir, cache_file)
params = load_param_dict(params_bytearray)
try:
extracted_tasks = ms.relay_integration.extract_tasks(mod, target=args.target, params=params)
except tvm.error.TVMError as error:
print(str(error))
return
task_cache_path = os.path.join(
args.task_cache_dir, cache_file.split(".")[0] + "_extracted_tasks.json"
)
is_spatial = tvm.get_global_func("tir.schedule.IsSpatialPrimFunc")
with open(task_cache_path, "w", encoding="utf8") as file:
for i, task in enumerate(extracted_tasks):
subgraph = task.dispatched[0]
prim_func = subgraph[subgraph.get_global_vars()[0]]
if not is_spatial(prim_func):
subgraph_str = save_json(subgraph)
json_obj = [task.task_name, json.loads(subgraph_str)]
json_str = json.dumps(json_obj)
assert "\n" not in json_str, "Failed to generate single line string."
if i == len(extracted_tasks) - 1:
file.write(json_str)
else:
file.write(json_str + "\n")
args = _parse_args() # pylint: disable=invalid-name
def main():
if not os.path.isdir(args.model_cache_dir):
raise Exception("Please provide a correct model cache dir.")
try:
os.makedirs(args.task_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.task_cache_dir} cannot be created successfully.")
paths = glob.glob(os.path.join(args.model_cache_dir, "*.json")) # pylint: disable=invalid-name
for path in tqdm(paths):
filename = path.split("/")[-1]
extract_and_save_tasks(filename)
if __name__ == "__main__":
main()
| 3,657 | 33.838095 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/tune_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import logging
from distutils.util import strtobool
from typing import Optional
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
required=False,
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=60,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.workload}")
with ms.Profiler() as profiler:
sch: Optional[tir.Schedule] = ms.tir_integration.tune_tir(
mod=create_te_workload(ARGS.workload, 0),
target=ARGS.target,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
)
print("Tuning Time:")
print(profiler.table())
if sch is None:
print("No valid schedule found!")
else:
print(sch.mod.script())
print(sch.trace)
if __name__ == "__main__":
main()
| 4,191 | 26.578947 | 91 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/te_workload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workloads in TE"""
# pylint: disable=missing-docstring
from typing import Tuple
from tvm import te, tir, topi
from tvm.target import Target
def batch_matmul_nkkm( # pylint: disable=invalid-name,missing-docstring
B: int,
N: int,
M: int,
K: int,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
x = te.placeholder((B, N, K), name="X", dtype=in_dtype)
y = te.placeholder((B, K, M), name="Y", dtype=in_dtype)
k = te.reduce_axis((0, K), name="k")
z = te.compute( # pylint: disable=invalid-name
(B, N, M),
lambda b, i, j: te.sum(
x[b][i][k].astype(out_dtype) * y[b][k][j].astype(out_dtype),
axis=[k],
),
name="Z",
)
return (x, y, z)
def conv1d_nlc( # pylint: disable=invalid-name,missing-docstring
N: int,
L: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, L, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder((kernel_size, CI // groups, CO), name="weight", dtype=in_dtype)
batch_size, in_len, _ = inputs.shape
k_len, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_len = (in_len + 2 * padding - dilation * (k_len - 1) - 1) // stride + 1
rc = te.reduce_axis((0, channel_per_group), name="rc")
rl = te.reduce_axis((0, k_len), name="rl")
padded = topi.nn.pad(inputs, [0, padding, 0])
output = te.compute(
(batch_size, out_len, out_channel),
lambda n, l, co: te.sum(
(
padded[
n,
l * stride + rl * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rl, rc, co].astype(out_dtype)
),
axis=[rl, rc],
),
name="conv1d_nlc",
)
return (inputs, weight, output)
def conv2d_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder(
(kernel_size, kernel_size, CI // groups, CO), name="weight", dtype=in_dtype
)
batch_size, in_h, in_w, _ = inputs.shape
k_h, k_w, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
rc = te.reduce_axis((0, channel_per_group), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, 0])
output = te.compute(
(batch_size, out_h, out_w, out_channel),
lambda n, h, w, co: te.sum(
(
padded[
n,
h * stride + rh * dilation,
w * stride + rw * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rh, rw, rc, co].astype(out_dtype)
),
axis=[rh, rw, rc],
),
name="conv2d_nhwc",
)
return (inputs, weight, output)
def conv3d_ndhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
D: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, D, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder(
(kernel_size, kernel_size, kernel_size, CI // groups, CO), name="weight", dtype=in_dtype
)
batch_size, in_d, in_h, in_w, _ = inputs.shape
k_d, k_h, k_w, channel_per_group, out_channel = weight.shape
out_channel_per_group = out_channel // groups
out_d = (in_d + 2 * padding - dilation * (k_d - 1) - 1) // stride + 1
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rd = te.reduce_axis((0, k_d), name="rd")
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
rc = te.reduce_axis((0, channel_per_group), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, padding, 0])
output = te.compute(
(batch_size, out_d, out_h, out_w, out_channel),
lambda n, d, h, w, co: te.sum(
(
padded[
n,
d * stride + rd * dilation,
h * stride + rh * dilation,
w * stride + rw * dilation,
co // out_channel_per_group * channel_per_group + rc,
].astype(out_dtype)
* weight[rd, rh, rw, rc, co].astype(out_dtype)
),
axis=[rd, rh, rw, rc],
),
name="conv3d_ndhwc",
)
return (inputs, weight, output)
def depthwise_conv2d_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
C: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
factor: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, C), dtype=in_dtype)
weight = te.placeholder((factor, kernel_size, kernel_size, C), dtype=in_dtype)
batch_size, in_h, in_w, in_channel = inputs.shape
factor, k_h, k_w, in_channel = weight.shape
out_channel = in_channel * factor
assert int(factor) == 1, "Not optimized for factor != 1"
out_h = (in_h + 2 * padding - dilation * (k_h - 1) - 1) // stride + 1
out_w = (in_w + 2 * padding - dilation * (k_w - 1) - 1) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
padded = topi.nn.pad(inputs, [0, padding, padding, 0])
output = te.compute(
(batch_size, out_h, out_w, out_channel),
lambda n, h, w, c: te.sum(
(
padded[
n,
h * stride + rh * dilation,
w * stride + rw * dilation,
c // factor,
].astype(out_dtype)
* weight[c % factor, rh, rw, c // factor].astype(out_dtype)
),
axis=[rh, rw],
),
name="depth_conv2d_nhwc",
)
return (inputs, weight, output)
def conv2d_transpose_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder((N, H, W, CI), name="inputs", dtype=in_dtype)
weight = te.placeholder((kernel_size, kernel_size, CI, CO), name="weight", dtype=in_dtype)
batch, in_h, in_w, in_c = inputs.shape
filter_h, filter_w, in_c, out_c = weight.shape
stride_h, stride_w = (stride, stride)
# compute padding
fpad_top, fpad_left, fpad_bottom, fpad_right = topi.nn.get_pad_tuple(
padding, (filter_h, filter_w)
)
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right
# padding stage
padded = topi.nn.pad(
inputs,
[
0,
(bpad_top + stride_h - 1) // stride_h,
(bpad_left + stride_w - 1) // stride_w,
0,
],
[
0,
(bpad_bottom + stride_h - 1) // stride_h,
(bpad_right + stride_w - 1) // stride_w,
0,
],
)
# remove extra padding introduced by dilatation
idx_div = te.indexdiv
idx_mod = te.indexmod
border_h = idx_mod(stride_h - idx_mod(bpad_top, stride_h), stride_h)
border_w = idx_mod(stride_w - idx_mod(bpad_left, stride_w), stride_w)
# dilation stage
strides = [1, stride_h, stride_w, 1]
n = len(padded.shape)
# We should embed this dilation directly into te.compute rather than creating a new te.compute.
# Only in this way can we use unroll to eliminate the multiplication of zeros.
def _dilate(*indices):
not_zero = []
index_tuple = []
for i in range(n):
if not strides[i] == 1:
index_tuple.append(idx_div(indices[i], strides[i]))
not_zero.append(idx_mod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = te.all(*not_zero)
return te.if_then_else(not_zero, padded(*index_tuple), tir.const(0.0, padded.dtype))
return padded(*index_tuple)
# convolution stage
out_h = (in_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (in_w - 1) * stride_w - fpad_left - fpad_right + filter_w
rc = te.reduce_axis((0, in_c), name="rc")
rh = te.reduce_axis((0, filter_h), name="rh")
rw = te.reduce_axis((0, filter_w), name="rw")
output = te.compute(
(batch, out_h, out_w, out_c),
lambda n, h, w, co: te.sum(
_dilate(n, h + rh + border_h, w + rw + border_w, rc).astype(out_dtype)
* weight[filter_h - 1 - rh, filter_w - 1 - rw, rc, co].astype(out_dtype),
axis=[rh, rw, rc],
),
name="conv2d_transpose_nhwc",
)
return (inputs, weight, output)
def conv2d_capsule_nhwijc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
capsule_size: int = 4,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
inputs = te.placeholder(
(N, H, W, capsule_size, capsule_size, CI), name="inputs", dtype=in_dtype
)
weight = te.placeholder(
(kernel_size, kernel_size, capsule_size, capsule_size, CI, CO),
name="weight",
dtype=in_dtype,
)
batch_size, in_h, in_w, _, _, in_channel = inputs.shape
k_h, k_w, _, _, _, out_channel = weight.shape
out_h = (in_h + 2 * padding - kernel_size) // stride + 1
out_w = (in_w + 2 * padding - kernel_size) // stride + 1
rh = te.reduce_axis((0, k_h), name="rh")
rw = te.reduce_axis((0, k_w), name="rw")
cap_k = te.reduce_axis((0, capsule_size), name="cap_k")
rc = te.reduce_axis((0, in_channel), name="rc")
padded = topi.nn.pad(inputs, [0, padding, padding, 0, 0, 0])
output = te.compute(
(batch_size, out_h, out_w, capsule_size, capsule_size, out_channel),
lambda n, h, w, cap_i, cap_j, co: te.sum(
(
padded[n, h * stride + rh, w * stride + rw, cap_i, cap_k, rc].astype(out_dtype)
* weight[rh, rw, cap_k, cap_j, rc, co].astype(out_dtype)
),
axis=[rh, rw, cap_k, rc],
),
name="conv2d_capsule_nhwijc",
)
return (inputs, weight, output)
def norm_bmn( # pylint: disable=invalid-name,missing-docstring
B: int,
M: int,
N: int,
) -> Tuple[te.Tensor, te.Tensor]:
a = te.placeholder((B, M, N), name="A")
i = te.reduce_axis((0, M), name="i")
j = te.reduce_axis((0, N), name="j")
c = te.compute(
(B,),
lambda b: te.sum(a[b][i][j] * a[b][i][j], axis=[i, j]),
name="C",
)
d = te.compute((B,), lambda b: te.sqrt(c[b]), name="D")
return (a, d)
def conv2d_nhwc_without_layout_rewrite( # pylint: disable=invalid-name
Input: te.Tensor,
Filter: te.Tensor,
stride: int,
padding: int,
dilation: int,
out_dtype="float32",
):
"""A copy of `topi.nn.conv2d_nhwc` but without the 'layout_free` attribute.
We use this in single op and subgraph evaluation
because we don't want to introduce graph level optimization.
"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = Input.shape # type: ignore
kernel_h, kernel_w, _channel, num_filter = Filter.shape # type: ignore
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = topi.nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = topi.utils.simplify(
(in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
)
out_width = topi.utils.simplify(
(in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = topi.nn.pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
PaddedInput[
nn, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rc
].astype(out_dtype)
* Filter[ry, rx, rc, ff].astype(out_dtype), # type: ignore
axis=[ry, rx, rc],
),
name="Conv2dOutput",
tag="conv2d_nhwc",
)
return Output
def conv2d_nhwc_bn_relu( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
strides: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor]:
data = te.placeholder((N, H, W, CI), name="data", dtype=in_dtype)
kernel = te.placeholder((kernel_size, kernel_size, CI, CO), name="kernel", dtype=in_dtype)
bias = te.placeholder((CO,), name="bias")
bn_scale = te.placeholder((CO,), name="bn_scale")
bn_offset = te.placeholder((CO,), name="bn_offset")
OH = (H + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
OW = (W + 2 * padding - (kernel_size - 1) * dilation - 1) // strides + 1
conv = conv2d_nhwc_without_layout_rewrite(data, kernel, strides, padding, dilation, out_dtype)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] + bias[l], name="bias_add"
)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] * bn_scale[l], name="bn_mul"
)
conv = te.compute(
(N, OH, OW, CO), lambda i, j, k, l: conv[i, j, k, l] + bn_offset[l], name="bn_add"
)
out = topi.nn.relu(conv)
return (data, kernel, bias, bn_offset, bn_scale, out)
def transpose_batch_matmul( # pylint: disable=invalid-name,missing-docstring
batch: int,
seq_len: int,
n_head: int,
n_dim: int,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
query = te.placeholder((batch, seq_len, n_head, n_dim), name="query", dtype=in_dtype)
value = te.placeholder((batch, seq_len, n_head, n_dim), name="value", dtype=in_dtype)
query_T = te.compute(
(batch, n_head, seq_len, n_dim),
lambda b, h, l, d: query[b, l, h, d],
name="query_T",
)
value_T = te.compute(
(batch, n_head, n_dim, seq_len),
lambda b, h, d, l: value[b, l, h, d],
name="value_T",
)
k = te.reduce_axis((0, n_dim), name="k")
out = te.compute(
(batch, n_head, seq_len, seq_len),
lambda b, h, i, j: te.sum(
query_T[b, h, i, k].astype(out_dtype) * value_T[b, h, k, j].astype(out_dtype), axis=[k]
),
name="C",
)
return (query, value, out)
def conv2d_winograd_nhwc( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
tile_size: int = 4,
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
from tvm.topi.nn.conv2d import ( # pylint: disable=import-outside-toplevel
_conv2d_winograd_nhwc_impl,
)
target = Target.current(allow_none=True)
if target is not None and target.kind.name == "cuda":
write_cache_level = 3
else:
write_cache_level = 2
data = te.placeholder((N, H, W, CI), "float32", name="data")
weight = te.placeholder((kernel_size, kernel_size, CO, CI), "float32", name="weight")
out = _conv2d_winograd_nhwc_impl(
data,
weight,
stride,
padding,
dilation,
"float32",
pre_computed=True,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
tile_size=tile_size,
write_cache_level=write_cache_level,
)
return (data, weight, out)
def conv2d_winograd_nchw( # pylint: disable=invalid-name,missing-docstring
N: int,
H: int,
W: int,
CI: int,
CO: int,
kernel_size: int,
stride: int = 1,
padding: int = 1,
dilation: int = 1,
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
from tvm.topi.cuda.conv2d_winograd import ( # pylint: disable=import-outside-toplevel
_infer_tile_size,
)
from tvm.topi.nn.conv2d import ( # pylint: disable=import-outside-toplevel
_conv2d_winograd_nchw_impl,
)
data = te.placeholder((N, CI, H, W), "float32", name="data")
weight = te.placeholder((kernel_size, kernel_size, CI, CO), "float32", name="weight")
out = _conv2d_winograd_nchw_impl(
data,
weight,
stride,
padding,
dilation,
"float32",
pre_computed=True,
auto_scheduler_rewritten_layout="",
meta_schedule_original_shape=None,
tile_size=_infer_tile_size(data, weight),
)
return (data, weight, out)
def matmul(
n: int, m: int, k: int, in_dtype: str = "float32", out_dtype: str = "float32"
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
a = te.placeholder((n, k), name="A", dtype=in_dtype)
b = te.placeholder((k, m), name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
c = te.compute(
(n, m),
lambda i, j: te.sum(a[i, k].astype(out_dtype) * b[k, j].astype(out_dtype), axis=[k]),
name="C",
)
return (a, b, c)
def matmul_relu(
n: int, m: int, k: int, in_dtype: str = "float32", out_dtype: str = "float32"
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
a = te.placeholder((n, k), name="A", dtype=in_dtype)
b = te.placeholder((k, m), name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
c = te.compute(
(n, m),
lambda i, j: te.sum(a[i, k].astype(out_dtype) * b[k, j].astype(out_dtype), axis=[k]),
name="C",
)
d = topi.nn.relu(c) # pylint: disable=invalid-name
return (a, b, d)
def conv2d_nchw( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
co: int,
kh: int,
kw: int,
stride: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor]:
x = te.placeholder((n, ci, h, w), name="X", dtype=in_dtype)
w = te.placeholder((co, ci, kh, kw), name="W", dtype=in_dtype)
y = topi.nn.conv2d_nchw(
Input=x, Filter=w, stride=stride, padding=padding, dilation=dilation, out_dtype=out_dtype
)
return (x, w, y)
def conv2d_nchw_bias_bn_relu( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
co: int,
kh: int,
kw: int,
stride: int,
padding: int,
dilation: int = 1,
in_dtype: str = "float32",
out_dtype: str = "float32",
) -> Tuple[te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor, te.Tensor]:
oh = (h + 2 * padding - (kh - 1) * dilation - 1) // stride + 1 # pylint: disable=invalid-name
ow = (w + 2 * padding - (kw - 1) * dilation - 1) // stride + 1 # pylint: disable=invalid-name
x = te.placeholder((n, ci, h, w), name="X", dtype=in_dtype)
w = te.placeholder((co, ci, kh, kw), name="W", dtype=in_dtype)
b = te.placeholder((co, 1, 1), name="B", dtype=out_dtype)
bn_scale = te.placeholder((co, 1, 1), name="bn_scale", dtype=out_dtype)
bn_offset = te.placeholder((co, 1, 1), name="bn_offset", dtype=out_dtype)
y = topi.nn.conv2d_nchw(
Input=x, Filter=w, stride=stride, padding=padding, dilation=dilation, out_dtype=out_dtype
)
y = te.compute((n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] + b[j, 0, 0], name="bias_add")
y = te.compute(
(n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] * bn_scale[j, 0, 0], name="bn_mul"
)
y = te.compute(
(n, co, oh, ow), lambda i, j, k, l: y[i, j, k, l] + bn_offset[j, 0, 0], name="bn_add"
)
y = topi.nn.relu(y)
return (x, w, b, bn_scale, bn_offset, y)
def max_pool2d_nchw( # pylint: disable=invalid-name
n: int,
h: int,
w: int,
ci: int,
padding: int,
) -> Tuple[te.Tensor, te.Tensor]: # pylint: disable=invalid-name
x = te.placeholder((n, ci, h, w), name="X")
y = topi.nn.pool2d(x, [2, 2], [1, 1], [1, 1], [padding, padding, padding, padding], "max")
return (x, y)
def softmax_mn(m, n) -> Tuple[te.Tensor, te.Tensor]: # pylint: disable=invalid-name
a = te.placeholder((m, n), name="A")
b = topi.nn.softmax(a, axis=1)
return (a, b)
def create_te_workload(name: str, idx: int) -> tir.PrimFunc:
workload_func, params = CONFIGS[name]
return te.create_prim_func(workload_func(*params[idx])) # type: ignore
CONFIGS = {
"C1D": (
conv1d_nlc,
[
# derived from conv2d_shapes
(1, 256, 64, 128, 3, 2, 1),
# (1, 256, 64, 128, 1, 2, 0),
# (1, 256, 64, 64, 1, 1, 0),
# (1, 128, 128, 256, 3, 2, 1),
(1, 128, 128, 256, 1, 2, 0),
# (1, 128, 128, 128, 3, 1, 1),
# (1, 64, 256, 512, 3, 2, 1),
# (1, 64, 256, 512, 1, 2, 0),
(1, 64, 256, 256, 5, 1, 2),
(1, 32, 512, 512, 3, 1, 1),
],
),
"C2D": (
conv2d_nhwc,
[
# all conv2d layers in resnet-18
(1, 224, 224, 3, 64, 7, 2, 3),
# (1, 56, 56, 64, 128, 3, 2, 1),
# (1, 56, 56, 64, 128, 1, 2, 0),
# (1, 56, 56, 64, 64, 3, 1, 1),
(1, 56, 56, 64, 64, 1, 1, 0),
# (1, 28, 28, 128, 256, 3, 2, 1),
# (1, 28, 28, 128, 256, 1, 2, 0),
# (1, 28, 28, 128, 128, 3, 1, 1),
# (1, 14, 14, 256, 512, 3, 2, 1),
# (1, 14, 14, 256, 512, 1, 2, 0),
(1, 14, 14, 256, 256, 3, 1, 1),
(1, 7, 7, 512, 512, 3, 1, 1),
],
),
"C3D": (
conv3d_ndhwc,
[
# Derived from conv2d_shapes. Use depth=16 for all configurations
(1, 16, 224, 224, 3, 64, 7, 2, 3),
# (1, 16, 56, 56, 64, 128, 3, 2, 1),
# (1, 16, 56, 56, 64, 128, 1, 2, 0),
# (1, 16, 56, 56, 64, 64, 3, 1, 1),
(1, 16, 56, 56, 64, 64, 1, 1, 0),
# (1, 16, 28, 28, 128, 256, 3, 2, 1),
# (1, 16, 28, 28, 128, 256, 1, 2, 0),
# (1, 16, 28, 28, 128, 128, 3, 1, 1),
# (1, 16, 14, 14, 256, 512, 3, 2, 1),
# (1, 16, 14, 14, 256, 512, 1, 2, 0),
(1, 16, 14, 14, 256, 256, 3, 1, 1),
(1, 16, 7, 7, 512, 512, 3, 1, 1),
],
),
"GMM": (
batch_matmul_nkkm,
[
(1, 128, 128, 128),
(1, 512, 32, 512),
(1, 512, 512, 512),
(1, 1024, 1024, 1024),
],
),
"GRP": (
conv2d_nhwc,
[
# Derived from conv2d_shapes. Use group=4 for all configurations
(1, 56, 56, 64, 128, 3, 2, 1, 1, 4),
# (1, 56, 56, 64, 128, 1, 2, 0 , 1, 4),
# (1, 56, 56, 64, 64, 3, 1, 1 , 1, 4),
(1, 56, 56, 64, 64, 1, 1, 0, 1, 4),
# (1, 28, 28, 128, 256, 3, 2, 1, 1, 4),
# (1, 28, 28, 128, 256, 1, 2, 0, 1, 4),
# (1, 28, 28, 128, 128, 3, 1, 1, 1, 4),
# (1, 14, 14, 256, 512, 3, 2, 1, 1, 4),
# (1, 14, 14, 256, 512, 1, 2, 0, 1, 4),
(1, 14, 14, 256, 256, 3, 1, 1, 1, 4),
(1, 7, 7, 512, 512, 3, 1, 1, 1, 4),
],
),
"DIL": (
conv2d_nhwc,
[
# Derived from conv2d_shapes. Use dilation=2 for all configurations
(1, 224, 224, 3, 64, 7, 2, 3, 2),
# (1, 56, 56, 64, 128, 3, 2, 1 , 2),
# (1, 56, 56, 64, 128, 1, 2, 0 , 2),
# (1, 56, 56, 64, 64, 3, 1, 1 , 2),
(1, 56, 56, 64, 64, 1, 1, 0, 2),
# (1, 28, 28, 128, 256, 3, 2, 1, 2),
# (1, 28, 28, 128, 256, 1, 2, 0, 2),
# (1, 28, 28, 128, 128, 3, 1, 1, 2),
# (1, 14, 14, 256, 512, 3, 2, 1, 2),
# (1, 14, 14, 256, 512, 1, 2, 0, 2),
(1, 14, 14, 256, 256, 3, 1, 1, 2),
(1, 7, 7, 512, 512, 3, 1, 1, 2),
],
),
"DEP": (
depthwise_conv2d_nhwc,
[
# all depthwise conv2d layers in mobilenet
(1, 112, 112, 32, 3, 1, 1),
(1, 112, 112, 64, 3, 2, 1),
# (1, 56, 56, 128, 3, 1, 1),
# (1, 56, 56, 128, 3, 2, 1),
# (1, 28, 28, 256, 3, 1, 1),
# (1, 28, 28, 256, 3, 2, 1),
# (1, 14, 14, 512, 3, 1, 1),
(1, 14, 14, 512, 3, 2, 1),
(1, 7, 7, 1024, 3, 1, 1),
],
),
"T2D": (
conv2d_transpose_nhwc,
[
# all conv2d transpose layers in DCGAN
(1, 4, 4, 512, 256, 4, 2, 1),
(1, 8, 8, 256, 128, 4, 2, 1),
(1, 16, 16, 128, 64, 4, 2, 1),
(1, 32, 32, 64, 3, 4, 2, 1),
],
),
"CAP": (
conv2d_capsule_nhwijc,
[
# all conv2d capsule layers in matrix capsules withemrouting (ICLR 2018)
(1, 16, 16, 32, 32, 3, 2, 1),
(1, 8, 8, 32, 32, 3, 1, 1),
(1, 16, 16, 8, 16, 3, 2, 1),
(1, 8, 8, 16, 16, 3, 1, 1),
],
),
"NRM": (
norm_bmn,
[
(1, 256, 256),
(1, 512, 512),
(1, 1024, 1024),
(1, 4096, 1024),
],
),
"SFM": (
softmax_mn,
[
(256, 256),
(512, 512),
(1024, 1024),
(2048, 2048),
],
),
"CBR": (
conv2d_nhwc_bn_relu,
[
(1, 224, 224, 3, 64, 7, 2, 3),
(1, 56, 56, 64, 128, 3, 2, 1),
(1, 28, 28, 128, 256, 1, 2, 0),
(1, 7, 7, 512, 512, 3, 1, 1),
],
),
"TBG": (
transpose_batch_matmul,
[
(1, 128, 12, 64),
(1, 128, 16, 64),
(1, 64, 12, 128),
(1, 128, 12, 128),
],
),
"C2D_WIN_NHWC": (
conv2d_winograd_nhwc,
[
(1, 14, 14, 128, 128, 6),
],
),
"C2D_WIN_NCHW": (
conv2d_winograd_nchw,
[
(1, 56, 56, 64, 64, 6),
],
),
}
| 29,336 | 32.41344 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/tune_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utility functions in meta schedule"""
from typing import Callable, Optional, Union, List, Dict
from statistics import median
import json
import warnings
import numpy as np # type: ignore
import tvm
from tvm.runtime import NDArray
def generate_input_data(
input_shape: List[int],
input_dtype: str,
*,
low: Optional[int] = None,
high: Optional[int] = None,
) -> np.ndarray:
"""Generate input date with given shape and data type.
Parameters
----------
input_shape : List[int]
The shape of the input data.
input_dtype : str
The data type of the input date.
Returns
-------
input_data : np.ndarray
The generated input data with given shape and data type in numpy ndarray.
"""
if input_dtype.startswith("float"):
return np.random.uniform(size=input_shape).astype(input_dtype)
if low is None or high is None:
warnings.warn(
f"Model input value range for shape {input_shape} of {input_dtype} is not set!"
)
range_map = {
"uint8": (0, 255),
"int8": (-128, 127),
"int32": (0, 10000),
"int64": (0, 10000),
}
if input_dtype in range_map:
_low, _high = range_map[input_dtype]
return np.random.randint(
low=_low if low is None else low,
high=_high if high is None else high,
size=input_shape,
dtype=input_dtype,
)
raise ValueError("Unsupported input datatype!")
def create_timer(backend: str) -> Callable:
"""Create a function to run and benchmark the performance of whole given runtime module,
or Executable in relay vm.
Parameters
----------
backend : str
The backend to use, graph / vm.
Returns
-------
func : Callable
The function to benchmark the workload.
"""
def f_timer(
rt_mod: Union[tvm.runtime.Module, tvm.runtime.vm.Executable],
dev: tvm.runtime.Device,
input_data: Dict[str, NDArray],
) -> None:
"""Run and benchmark the given runtime module, print out the result.
Parameters
----------
rt_mod : Union[tvm.runtime.Module, tvm.runtime.vm.Executable]
The runtime module or vm executable.
dev : tvm.runtime.Device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
from tvm.contrib.graph_executor import GraphModule # pylint:disable=import-outside-toplevel
from tvm.runtime.vm import VirtualMachine # pylint:disable=import-outside-toplevel
try:
if backend == "vm":
vm = VirtualMachine(rt_mod, dev) # pylint: disable=invalid-name
ftimer = vm.benchmark(
dev, min_repeat_ms=500, repeat=5, number=1, end_to_end=False, **input_data
)
elif backend == "graph":
mod = GraphModule(rt_mod["default"](dev))
for input_name, input_value in input_data.items():
mod.set_input(input_name, input_value)
ftimer = mod.module.time_evaluator(
"run", dev, min_repeat_ms=500, repeat=5, number=1
)()
else:
raise ValueError(f"Backend {backend} not supported in f_timer!")
results = list(np.array(ftimer.results) * 1000.0) # type: ignore
print("Running time in time_evaluator: ", results)
print("-------------------------------")
print(f" Min (ms) : {min(results)}")
print(f" Max (ms) : {max(results)}")
print(f" Median (ms) : {median(results)}")
print(f"Average (ms) : {sum(results) / len(results)}")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_timer via RPC failed, exception: {exc}",
)
return f_timer
def create_time_per_layer(graph: str) -> Callable:
"""Create a function to run and benchmark the per-layer performance of given runtime module,
given the graph output of the module from graph compiler.
Parameters
----------
graph : str
The json format graph output of the module from graph compiler.
Returns
-------
func : Callable
The function using the json format graph.
"""
def f_time_per_layer(
rt_mod: tvm.runtime.Module,
dev: tvm.runtime.Device,
input_data: Dict[str, NDArray],
) -> None:
"""Run and benchmark the per-layer performance of given runtime module,
print out the result.
Parameters
----------
rt_mod : tvm.runtime.Module
The runtime module.
dev : tvm.runtime.Device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
# pylint:disable=import-outside-toplevel
from tvm.contrib.debugger.debug_executor import create
# pylint:enable=import-outside-toplevel
try:
mod = create(graph, rt_mod, dev)
for input_name, input_value in input_data.items():
mod.set_input(input_name, input_value)
graph_nodes = [n["name"] for n in json.loads(graph)["nodes"]]
graph_time = mod.run_individual(number=10, repeat=1, min_repeat_ms=5000)
print("Running time of each layer:")
print("---------------------------")
print("|graph_nodes| = ", len(graph_nodes))
print("|graph_time| = ", len(graph_time))
for k, v in zip(graph_nodes, graph_time):
print(k, float(v) * 1e6, "us")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_time_per_layer via RPC failed, exception: {exc}",
)
return f_time_per_layer
def create_calculator(backend: str) -> Callable:
"""Create a function to fetch the computing result of running the given runtime module.
Parameters
----------
backend : str
The backend to use, only tir is supported for now.
Returns
-------
func : Callable
The function to fetch the computing result.
"""
def f_calculator(
rt_mod: tvm.runtime.Module,
dev: tvm.runtime.Device, # pylint: disable=unused-argument
input_data: Dict[str, NDArray],
) -> List[NDArray]:
"""Fetch the result of running the given runtime module.
Parameters
----------
rt_mod : Union[tvm.runtime.Module, tvm.runtime.vm.Executable]
The runtime module or vm executable.
dev : tvm.device
The device type to run workload.
input_data : Dict[str, np.ndarray]
The input data as a dictionary.
"""
try:
if backend == "tir":
data = [v for _, v in sorted(input_data.items(), key=lambda x: x[0])]
rt_mod(*data)
return data
else:
raise ValueError(f"Backend {backend} not supported in f_calculator!")
except Exception as exc: # pylint: disable=broad-except
print(
f"Run module f_calculator via RPC failed, exception: {exc}",
)
return None
return f_calculator
| 8,241 | 33.057851 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/dataset_collect_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import os
from typing import List, Tuple
from tqdm import tqdm # type: ignore
from tvm.meta_schedule.testing.relay_workload import get_network
# pylint: disable=too-many-branches
def _build_dataset() -> List[Tuple[str, List[int]]]:
network_keys = []
for name in [
"resnet_18",
"resnet_50",
"mobilenet_v2",
"mobilenet_v3",
"wide_resnet_50",
"resnext_50",
"densenet_121",
"vgg_16",
]:
for batch_size in [1, 4, 8]:
for image_size in [224, 240, 256]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
# inception-v3
for name in ["inception_v3"]:
for batch_size in [1, 2, 4]:
for image_size in [299]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
# resnet3d
for name in ["resnet3d_18"]:
for batch_size in [1, 2, 4]:
for image_size in [112, 128, 144]:
network_keys.append((name, [batch_size, 3, image_size, image_size, 16]))
# bert
for name in ["bert_tiny", "bert_base", "bert_medium", "bert_large"]:
for batch_size in [1, 2, 4]:
for seq_length in [64, 128, 256]:
network_keys.append((name, [batch_size, seq_length]))
# dcgan
for name in ["dcgan"]:
for batch_size in [1, 4, 8]:
for image_size in [64]:
network_keys.append((name, [batch_size, 3, image_size, image_size]))
return network_keys
def main():
model_cache_dir = args.model_cache_dir
try:
os.makedirs(model_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {model_cache_dir} cannot be created successfully.")
keys = _build_dataset()
for name, input_shape in tqdm(keys):
get_network(name=name, input_shape=input_shape, cache_dir=model_cache_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser() # pylint: disable=invalid-name
parser.add_argument(
"--model_cache_dir",
type=str,
help="Please provide the full path to the model cache dir.",
)
args = parser.parse_args() # pylint: disable=invalid-name
main()
| 3,050 | 34.476744 | 88 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities in meta schedule"""
# NOTE: Do not import any module here by default
| 876 | 42.85 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/local_rpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC tracker and server running locally"""
from tvm.rpc.tracker import Tracker
from tvm.rpc.server import Server
class LocalRPC:
"""A pair of RPC tracker/server running locally
Parameters
----------
tracker_host : str
The host URL of the tracker
tracker_port : int
The port of the tracker
tracker_key: str
The key used in the tracker to refer to a worker
"""
tracker_host: str
tracker_port: int
tracker_key: str
def __init__(
self,
tracker_key: str = "key",
silent: bool = False,
no_fork: bool = False,
) -> None:
self.tracker = Tracker(
silent=silent,
port=9190,
port_end=12345,
)
self.server = Server(
host="0.0.0.0",
is_proxy=False,
tracker_addr=(self.tracker.host, self.tracker.port),
key=tracker_key,
silent=silent,
no_fork=no_fork,
port=9190,
port_end=12345,
)
self.tracker_host = self.tracker.host
self.tracker_port = self.tracker.port
self.tracker_key = tracker_key
def __enter__(self):
return self
def __exit__(self, _type, _value, _traceback):
if hasattr(self, "server"):
del self.server
if hasattr(self, "tracker"):
del self.tracker
| 2,186 | 29.375 | 64 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/space_generation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from typing import List, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import meta_schedule as ms
from tvm.ir import IRModule, structural_equal
from tvm.target import Target
from tvm.tir import Schedule
from tvm.tir.schedule import Trace
from tvm.tir.schedule.testing import verify_trace_roundtrip
def get_rules(
kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"],
types: Union[type, Tuple[type, ...]],
) -> List[ms.ScheduleRule]:
"""Get default schedule rules"""
rules = ms.ScheduleRule.create(kind)
return [rule for rule in rules if isinstance(rule, types)]
def generate_design_space(
kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"],
mod: IRModule,
target: Target,
types: Union[type, Tuple[type, ...]],
sch_rules: Optional[List[ms.ScheduleRule]] = None,
) -> List[Schedule]:
if sch_rules is None:
sch_rules = get_rules(kind, types)
else:
assert types is None
return ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=sch_rules,
postprocs=[],
mutator_probs={},
),
task_name="test",
).generate_design_space()
def _find_match_sketch_id(
mod: IRModule,
sketches: List[Schedule],
expected_mod: IRModule,
expected_decision: List[Tuple[str, List[int]]],
*,
debug_mask="all",
) -> Optional[int]:
for sketch_id, sketch in enumerate(sketches):
i = 0
new_decisions = {}
for inst in sketch.trace.insts:
if not inst.kind.name.startswith("Sample"):
continue
assert i < len(expected_decision)
if inst.kind.name == expected_decision[i][0]:
new_decisions[inst] = expected_decision[i][1]
i += 1
if len(new_decisions) != len(expected_decision):
continue
sch = Schedule(mod, debug_mask=debug_mask)
Trace(
insts=sketch.trace.insts,
decisions=new_decisions,
).apply_to_schedule(sch, remove_postproc=True)
if structural_equal(sch.mod, expected_mod):
verify_trace_roundtrip(sch=sch, mod=mod, debug_mask=debug_mask, text_format="json")
return sketch_id
return None
def check_sketches(
mod: IRModule,
sketches: List[Schedule],
expected_mods: List[IRModule],
expected_decisions: List[List[Tuple[str, List[int]]]],
*,
debug_mask="all",
):
assert len(expected_mods) == len(expected_decisions)
assert len(sketches) == len(expected_mods)
expected_mods = [
IRModule({"main": m}) if not isinstance(m, IRModule) else m for m in expected_mods
]
sketches = list(sketches)
for expected_id, (expected_mod, expected_decision) in enumerate(
zip(expected_mods, expected_decisions)
):
sketch_id = _find_match_sketch_id(
mod,
sketches,
expected_mod,
expected_decision,
debug_mask=debug_mask,
)
if sketch_id is None:
raise AssertionError(
f"Expected sketch #{expected_id} doesn't exist in the generated sketches."
)
sketches.pop(sketch_id)
def print_sketches(sketches: List[Schedule]):
for i, sch in enumerate(sketches):
print(f"###### {i}")
sch.mod.show(black_format=False)
for inst in sch.trace.insts:
if inst in sch.trace.decisions:
print(f'("{inst.kind.name}", {sch.trace.decisions[inst]}),')
| 4,511 | 32.671642 | 95 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/relay_workload.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Workloads in Relay IR"""
# pylint: disable=import-outside-toplevel
import logging
import multiprocessing
import os
import pickle
from typing import Any, Dict, List, Optional, Tuple
import tvm
import tvm.relay.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm.ir import IRModule
from tvm.runtime import NDArray, load_param_dict, save_param_dict
from tvm.target import Target
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _get_network(
args: Tuple[str, List[int], Optional[str]]
) -> Tuple[IRModule, bytearray, Tuple[str, List[int], str]]:
name: str
input_shape: List[int]
layout: Optional[str]
name, input_shape, layout = args
if layout == "None":
layout = None
mod: IRModule
if name in [
"resnet_18",
"resnet_50",
"wide_resnet_50",
"resnext_50",
"mobilenet_v2",
"mobilenet_v3",
"inception_v3",
"densenet_121",
"resnet3d_18",
"vgg_16",
]:
import torch # type: ignore
from torchvision import models # type: ignore
assert layout is None or layout in ["NCHW", "NHWC"]
params: Dict[str, Any] = {}
if name in ["resnet_18", "resnet_50"]:
model = getattr(models, name.replace("_", ""))
elif name == "wide_resnet_50":
model = getattr(models, "wide_resnet50_2")
elif name == "resnext_50":
model = getattr(models, "resnext50_32x4d")
elif name == "mobilenet_v2":
model = getattr(models, name)
elif name == "mobilenet_v3":
model = getattr(models, name + "_large")
elif name == "inception_v3":
model = getattr(models, name)
params["aux_logits"] = False
elif name == "densenet_121":
model = getattr(models, name.replace("_", ""))
elif name == "resnet3d_18":
model = models.video.r3d_18
elif name == "vgg_16":
model = getattr(models, name.replace("_", ""))
try:
model = model(**params, weights=None)
except TypeError:
model = model(**params, pretrained=False)
dtype = "float32"
input_data = torch.randn(input_shape).type( # pylint: disable=no-member
{
"float32": torch.float32, # pylint: disable=no-member
}[dtype]
)
scripted_model = torch.jit.trace(model, input_data).eval() # type: ignore
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
passes = [relay.transform.RemoveUnusedFunctions()]
if layout is None or layout == "NHWC":
# PyTorch is imported as NCHW by default
passes.append(
relay.transform.ConvertLayout(
{
"nn.conv2d": ["NHWC", "default"],
"nn.conv3d": ["NDHWC", "default"],
"nn.max_pool2d": ["NHWC", "default"],
"nn.avg_pool2d": ["NHWC", "default"],
}
)
)
with tvm.transform.PassContext(opt_level=3):
mod = tvm.transform.Sequential(passes)(mod)
inputs = (input_name, input_shape, dtype)
elif name in ["bert_tiny", "bert_base", "bert_medium", "bert_large"]:
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# pip3 install transformers==3.5 torch==1.7
import torch # type: ignore
import transformers # type: ignore
assert layout is None
config_dict = {
"bert_tiny": transformers.BertConfig(
num_hidden_layers=6,
hidden_size=512,
intermediate_size=2048,
num_attention_heads=8,
return_dict=False,
),
"bert_base": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=768,
intermediate_size=3072,
num_attention_heads=12,
return_dict=False,
),
"bert_medium": transformers.BertConfig(
num_hidden_layers=12,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
"bert_large": transformers.BertConfig(
num_hidden_layers=24,
hidden_size=1024,
intermediate_size=4096,
num_attention_heads=16,
return_dict=False,
),
}
configuration = config_dict[name]
model = transformers.BertModel(configuration)
input_name = "input_ids"
input_dtype = "int64"
a = torch.randint(10000, input_shape) # pylint: disable=no-member
model.eval()
scripted_model = torch.jit.trace(model, [a], strict=False) # type: ignore
input_name = "input_ids"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
mod = relay.transform.FastMath()(mod)
mod = relay.transform.CombineParallelBatchMatmul()(mod)
inputs = (input_name, input_shape, input_dtype)
elif name == "dcgan":
assert layout is None
output_shape = input_shape
batch_size = output_shape[0]
oshape = output_shape[1:]
mod, params = relay.testing.dcgan.get_workload(
batch_size=batch_size,
oshape=oshape,
layout="NHWC",
)
inputs = ("data", [100], "float32")
else:
raise ValueError("Invalid name: " + name)
params_bytearray: bytearray = save_param_dict(params)
return mod, params_bytearray, inputs
def _load_cache(cache_dir: Optional[str], filename: str) -> Optional[List[Any]]:
if cache_dir is None:
return None
path = os.path.join(os.path.expanduser(cache_dir), filename)
if not os.path.exists(path):
return None
logger.info("Loaded from cached: %s", path)
with open(path, "rb") as i_f:
return pickle.load(i_f)
def _save_cache(cache_dir: Optional[str], filename: str, objects: List[Any]) -> None:
if cache_dir is None:
return
path = os.path.join(os.path.expanduser(cache_dir), filename)
with open(path, "wb") as o_f:
pickle.dump(objects, o_f)
def get_network(
name: str,
input_shape: List[int],
*,
layout: Optional[str] = None,
cache_dir: Optional[str] = None,
) -> Tuple[IRModule, Dict[str, NDArray], Tuple[str, List[int], str]]:
"""Get the symbol definition and random weight of a network
Parameters
----------
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
layout : Optional[str]
The layout of the input tensor. For vision models, the layout is by default NHWC.
cache_dir : Optional[str], optional
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
mod : IRModule
The IRModule representing the network.
params : Dict[str, NDArray]
The parameters of the networks.
inputs : Tuple[str, List[int], str]
The name, shape and dtype of the input tensor.
"""
mod: IRModule
params: Dict[str, NDArray]
inputs: Tuple[str, List[int], str]
params_bytearray: bytearray
filename = f'relay-{name}-{layout}-{",".join(str(i) for i in input_shape)}.json'
cached = _load_cache(cache_dir, filename)
if cached is None:
with multiprocessing.Pool(processes=1) as pool:
result = pool.map(_get_network, [(name, input_shape, layout)])
((mod, params_bytearray, inputs),) = result
cached = [mod, params_bytearray, inputs]
_save_cache(cache_dir, filename, cached)
mod, params_bytearray, inputs = cached
params = load_param_dict(params_bytearray)
return mod, params, inputs
def extract_from_relay(
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]],
name: str,
input_shape: List[int],
*,
cache_dir: Optional[str] = None,
) -> List[ms.ExtractedTask]:
"""Extract the tasks from a network.
Parameters
----------
mod : IRModule
The IRModule representing the network.
target : Target
The target that the network will be deployed to.
params : Optional[Dict[str, NDArray]]
The parameters of the networks.
name : str
The name of the network.
input_shape : List[int]
The shape of the input tensor.
cache_dir : Optional[str]
The directory to cache the generated network.
If not specified, the cache will be disabled.
Returns
-------
extracted_tasks : List[ExtractedTask]
The extracted tasks.
"""
filename = f'tasks-{target.kind.name}-{name}-{",".join(str(i) for i in input_shape)}.json'
extracted_tasks = _load_cache(cache_dir, filename)
if extracted_tasks is None:
extracted_tasks = ms.relay_integration.extract_tasks(
mod=mod,
target=target,
params=params,
)
extracted_tasks = list(extracted_tasks)
_save_cache(cache_dir, filename, extracted_tasks)
return extracted_tasks
SUPPORTED = [
# TorchVision
"resnet_18",
"resnet_50",
"mobilenet_v2",
"mobilenet_v3",
"wide_resnet_50",
"resnext_50",
"resnet3d_18",
"inception_v3",
"densenet_121",
"vgg_16",
# Transformer
"bert_tiny",
"bert_base",
"bert_medium",
"bert_large",
# Relay testing
"dcgan",
]
| 10,611 | 32.371069 | 94 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/distributed_measure_candidates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import glob
import os
from tqdm import tqdm # type: ignore
from tvm import meta_schedule as ms
from tvm.target import Target
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--candidate_cache_dir", type=str, help="Please provide the full path to the candidates."
)
parser.add_argument(
"--result_cache_dir", type=str, help="Please provide the full path to the result database."
)
parser.add_argument(
"--target",
type=str,
default="nvidia/nvidia-v100",
help="Please specify the target hardware for tuning context.",
)
parser.add_argument(
"--rpc_host", type=str, help="Please provide the private IPv4 address for the tracker."
)
parser.add_argument(
"--rpc_port", type=int, default=4445, help="Please provide the port for the tracker."
)
parser.add_argument(
"--rpc_key",
type=str,
default="p3.2xlarge",
help="Please provide the key for the rpc servers.",
)
parser.add_argument(
"--builder_timeout_sec",
type=int,
default=10,
help="The time for the builder session to time out.",
)
parser.add_argument(
"--min_repeat_ms", type=int, default=100, help="The time for preheating the gpu."
)
parser.add_argument(
"--runner_timeout_sec",
type=int,
default=100,
help="The time for the runner session to time out.",
)
parser.add_argument(
"--cpu_flush", type=bool, default=False, help="Whether to enable cpu cache flush or not."
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size of candidates sent to builder and runner each time.",
)
return parser.parse_args()
# pylint: disable=too-many-locals
def measure_candidates(database, builder, runner):
"""Send the candidates to builder and runner for distributed measurement,
and save the results in a new json database.
Parameters
----------
database : JSONDatabase
The database for candidates to be measured.
builder : Builder
The builder for building the candidates.
runner : Runner
The runner for measuring the candidates.
Returns
-------
None
"""
candidates, runner_results, build_fail_indices, run_fail_indices = [], [], [], []
context = ms.TuneContext(target=Target(args.target))
tuning_records = database.get_all_tuning_records()
for record in tuning_records:
candidates.append(record.as_measure_candidate())
with ms.Profiler() as profiler:
for idx in range(0, len(candidates), args.batch_size):
batch_candidates = candidates[idx : idx + args.batch_size]
context._set_measure_candidates(batch_candidates) # pylint: disable=protected-access
with ms.Profiler.timeit("build"):
context._send_to_builder(builder) # pylint: disable=protected-access
with ms.Profiler.timeit("run"):
context._send_to_runner(runner) # pylint: disable=protected-access
batch_runner_results = context._join() # pylint: disable=protected-access
runner_results.extend(batch_runner_results)
for i, result in enumerate(context.builder_results):
if result.error_msg is None:
ms.utils.remove_build_dir(result.artifact_path)
else:
build_fail_indices.append(i + idx)
context._clear_measure_state() # pylint: disable=protected-access
model_name, workload_name = database.path_workload.split("/")[-2:]
record_name = database.path_tuning_record.split("/")[-1]
new_database = ms.database.JSONDatabase(
path_workload=os.path.join(args.result_cache_dir, model_name, workload_name),
path_tuning_record=os.path.join(args.result_cache_dir, model_name, record_name),
)
workload = tuning_records[0].workload
new_database.commit_workload(workload.mod)
for i, (record, result) in enumerate(zip(tuning_records, runner_results)):
if result.error_msg is None:
new_database.commit_tuning_record(
ms.database.TuningRecord(
trace=record.trace,
workload=workload,
run_secs=[v.value for v in result.run_secs],
target=Target(args.target),
)
)
else:
run_fail_indices.append(i)
fail_indices_name = workload_name.replace("_workload.json", "_failed_indices.txt")
with open(
os.path.join(args.result_cache_dir, model_name, fail_indices_name), "w", encoding="utf8"
) as file:
file.write(" ".join([str(n) for n in run_fail_indices]))
print(
f"Builder time: {profiler.get()['build']}, Runner time: {profiler.get()['run']}\n\
Failed number of builds: {len(build_fail_indices)},\
Failed number of runs: {len(run_fail_indices)}"
)
args = _parse_args() # pylint: disable=invalid-name
def main():
builder = ms.builder.LocalBuilder(timeout_sec=args.builder_timeout_sec)
runner = ms.runner.RPCRunner(
rpc_config=ms.runner.RPCConfig(
tracker_host=args.rpc_host,
tracker_port=args.rpc_port,
tracker_key=args.rpc_key,
session_timeout_sec=args.runner_timeout_sec,
),
evaluator_config=ms.runner.EvaluatorConfig(
number=3,
repeat=1,
min_repeat_ms=args.min_repeat_ms,
enable_cpu_cache_flush=args.cpu_flush,
),
max_workers=os.cpu_count(),
)
if not os.path.isdir(args.candidate_cache_dir):
raise Exception("Please provide a correct candidate cache dir.")
try:
os.makedirs(args.result_cache_dir, exist_ok=True)
except OSError:
print(f"Directory {args.result_cache_dir} cannot be created successfully.")
model_dirs = glob.glob(os.path.join(args.candidate_cache_dir, "*"))
for model_dir in model_dirs:
model_name = model_dir.split("/")[-1]
os.makedirs(os.path.join(args.result_cache_dir, model_name), exist_ok=True)
all_tasks = glob.glob(os.path.join(model_dir, "*.json"))
workload_paths = []
for path in all_tasks:
if path.endswith("_workload.json"):
workload_paths.append(path)
for workload_path in tqdm(workload_paths):
candidate_path = workload_path.replace("_workload.json", "_candidates.json")
database = ms.database.JSONDatabase(
path_workload=workload_path,
path_tuning_record=candidate_path,
)
measure_candidates(database, builder, runner)
if __name__ == "__main__":
main()
| 7,702 | 37.708543 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import logging
from distutils.util import strtobool
from typing import Dict
import numpy as np # type: ignore
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
)
args.add_argument(
"--input-shape",
type=str,
required=True,
)
args.add_argument(
"--target",
type=str,
required=True,
)
args.add_argument(
"--num-trials",
type=int,
required=True,
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
)
args.add_argument(
"--work-dir",
type=str,
required=True,
)
args.add_argument(
"--layout",
type=str,
default=None,
)
args.add_argument(
"--cache-dir",
type=str,
default=None,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--adaptive-training",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
default=True,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = _parse_args()
def main():
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
layout=ARGS.layout,
cache_dir=ARGS.cache_dir,
)
input_info = [
{
"name": input_name,
"shape": input_shape,
"dtype": input_dtype,
},
]
input_data: Dict[str, np.ndarray] = {
item["name"]: generate_input_data( # type: ignore
item["shape"], # type: ignore
item["dtype"], # type: ignore
)
for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
database = ms.relay_integration.tune_relay(
mod=mod,
target=ARGS.target,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
num_trials_per_iter=64,
params=params,
runner=ms.runner.RPCRunner( # type: ignore
rpc_config=ARGS.rpc_config,
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
),
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
strategy=ms.search_strategy.EvolutionarySearch(),
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| 5,867 | 26.420561 | 91 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/torchbench/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, pointless-exception-statement
"""
Helper functions for running TorchBench through the benchmark functions
from TorchDynamo.
"""
import functools
import os
import sys
from dataclasses import dataclass
from enum import Enum
from typing import Set
import torch # type: ignore
class DisallowedOperator(Enum):
"""
The operators to disallow in the fx graph produced by TorchDynamo.
This is to workaround the limitation in TVM's PyTorch frontend.
- inplace_copy: aten::copy_ as inplace assign A[...] = ..., or method call A.copy_(...)
- einsum: torch.functional.einsum
- multihead_attention: torch.nn.MultiheadAttention
- as_stride: Tensor.as_stride
"""
INPLACE_COPY = "inplace_copy"
EINSUM = "einsum"
MULTIHEAD_ATTENTION = "multihead_attention"
AS_STRIDE = "as_stride"
def find_torchdynamo() -> str:
"""
Find the directory of TorchDynamo repo.
It can't directly import the benchmark runner in TorchDynamo
becuase it isn't designed to be used as a Python package.
"""
candidates = [
"torchdynamo",
"../torchdynamo",
"../../torchdynamo",
]
for library_dir in candidates:
if os.path.exists(f"{library_dir}/benchmarks"):
return library_dir
raise RuntimeError(
"""
Cannot find directory for torchdynamo.
You need to clone https://github.com/pytorch/torchdynamo to the parent directory of cwd.
"""
)
DYNAMO_DIR = find_torchdynamo()
sys.path.insert(
0, DYNAMO_DIR
) # opacus_cifar10 depends on opacus, which installs a package called 'benchmarks'
sys.path.append(f"{DYNAMO_DIR}/benchmarks")
# pylint: disable=wrong-import-position, unused-import
import torchdynamo # type: ignore
from benchmarks.common import same, timed # type: ignore
from torchbench import TorchBenchmarkRunner # type: ignore
# pylint: disable=wrong-import-position, unused-import
def _disallow_operators(disallowed_ops: Set[DisallowedOperator]):
"""
Disallow certain operators in the fx graph produced by TorchDynamo.
There are two ways to disallow operator in TorchDynamo,
1. Use the disallow_in_graph API, which only applies to free function call.
2. Patch the TensorVariable class, which applies to method call on torch.Tensor.
"""
disallowed_tensor_methods: Set[str] = set()
if DisallowedOperator.INPLACE_COPY in disallowed_ops:
torchdynamo.disallow_in_graph(torch.Tensor.copy_)
disallowed_tensor_methods.update({"copy_", "__setitem__"})
if DisallowedOperator.EINSUM in disallowed_ops:
torchdynamo.disallow_in_graph(torch.functional.einsum)
if DisallowedOperator.MULTIHEAD_ATTENTION in disallowed_ops:
torchdynamo.disallow_in_graph(torch.nn.MultiheadAttention)
if DisallowedOperator.AS_STRIDE in disallowed_ops:
disallowed_tensor_methods.add("as_stride")
tensor_variable_cls = torchdynamo.variables.tensor.TensorVariable
old_call_method = tensor_variable_cls.call_method
@functools.wraps(old_call_method)
def call_method(self, translator, name, args, kwargs):
if name in disallowed_tensor_methods:
raise torchdynamo.exc.Unsupported(f"Tensor.{name} not supported by TVM.")
return old_call_method(self, translator, name, args, kwargs)
tensor_variable_cls.call_method = call_method
def load_torchdynamo_benchmark_runner(
is_cuda: bool,
cosine_similarity: bool = False,
float32: bool = False,
disallowed_operators: Set[DisallowedOperator] = None,
) -> TorchBenchmarkRunner:
"""
Load the benchmark runner from TorchDynamo.
"""
@dataclass
class RunnerArgs:
"""
This class simulates the parsed args required by the benchmark code from TorchDynamo.
"""
ci: bool = False # Whether runs in CI mode. pylint: disable=invalid-name
training: bool = False # Whether it benchmarks training workload.
use_eval_mode: bool = True # Whether the model should be in eval mode.
dynamic_shapes: bool = False # Whether runs the model in dynamic shape mode.
float16: bool = False # Whether to cast model and inputs to float16
float32: bool = False # Whether to cast model and inputs to float32
accuracy: bool = False # Whether to perform a accuracy test
performance: bool = True # Whether to perform a performance test
cosine: bool = False # Whether to use consine similarity to check if output is correct.
args = RunnerArgs(cosine=cosine_similarity, float32=float32)
runner = TorchBenchmarkRunner()
runner.args = args
runner.model_iter_fn = runner.forward_pass
if disallowed_operators:
_disallow_operators(disallowed_operators)
if is_cuda:
# pylint: disable=import-outside-toplevel
import benchmarks.common # type: ignore
# pylint: enable=import-outside-toplevel
benchmarks.common.synchronize = torch.cuda.synchronize
return runner
| 5,837 | 33.75 | 96 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/torchbench/run.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""
This script is for benchmarking TVM performance on models from TorchBench.
It uses the TorchDynamo as the frontend to ingest models into TVM, and it also
leverages the benchmark util from TorchDynamo.
TorchDynamo (https://github.com/pytorch/torchdynamo) and TorchBench
(https://github.com/pytorch/benchmark) need to be in the parent directory of TVM.
We need a local clone of these repos because torchbench and the benchmark runner
in TorchDynamo isn't designed to be used as a Python package.
To setup the environment, run the following commands in the parent directory of TVM and with
the appropriate Python environment:
```bash
# torchdynamo requires nightly pytorch. If it fails to find the specified version, try
# installing the latest nightly pytorch.
pip3 install --pre \
--extra-index-url https://download.pytorch.org/whl/nightly/cu116 \
torch==1.13.0.dev20220926 \
torchvision==0.14.0.dev20220926 \
torchtext==0.14.0.dev20220926
git clone https://github.com/pytorch/torchdynamo
pushd torchdynamo
git checkout c537639f9712621dc04ca09908796dbbe86c354b
pip install -e .
popd
sudo apt install git-lfs # git lfs is used for TorchBench
git clone https://github.com/pytorch/benchmark
pushd benchmark
python install.py --continue_on_fail # fambench_xlmr might fail to install
popd
```
To run a benchmark, the script can be run under 'tune' mode by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode tune \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 20000 \
--rpc-host <rpc tracker host for tuning> \
--rpc-port <rpc tracker port for tuning> \
--rpc-key <rpc key> \
```
All available target tags (like nvidia/geforce-rtx-3070) can be found at
https://github.com/apache/tvm/blob/main/src/target/tag.cc
Then the script can be run under 'eval' mode to actual benchmark the performance,
using the tuning database under the work directory. This can be executed on a different
machine than the one executes tuning (the database json files need to be inside
of the work directory).
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode eval \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
Alternatively, both tuning and evaluation can be done in a single run on the same machine,
by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode all \
--model resnet50 \
--target "llvm -num-cores 6" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
"""
# pylint: disable=logging-format-interpolation
import argparse
import contextlib
import logging
import os
import pickle
import sys
import warnings
from collections import defaultdict
from enum import Enum
from typing import Callable, Dict, List, Tuple
import numpy as np # type: ignore
import torch # type: ignore
from scipy.stats import ttest_ind # type: ignore
import tvm
import tvm.relay
from tvm import meta_schedule as ms
from tvm._ffi import get_global_func
from tvm.contrib.graph_executor import GraphModule
from tvm.meta_schedule.testing.torchbench.utils import (
DisallowedOperator,
load_torchdynamo_benchmark_runner,
same,
timed,
)
from tvm.runtime.vm import VirtualMachine
from tvm.support import describe
# Needs to be imported after the .utils is executed
import torchdynamo # type: ignore # isort: skip, pylint: disable=wrong-import-order
class RunMode(Enum):
"""
The running mode of this script. Available values are:
- extract: Only import the model and extract tuning tasks from it.
- tune: Only tune the tasks and create the tuning database.
- eval: Only benchmark model using pre-existing tuning database.
- all: Run both tuning and benchmark
"""
ALL = "all"
EXTRACT = "extract"
TUNE = "tune"
EVAL = "eval"
@property
def should_extract(self):
"""
Returns whether it should extract tuning tasks.
"""
return self in (RunMode.ALL, RunMode.EXTRACT)
@property
def should_tune(self):
"""
Returns whether it should tune the tasks.
"""
return self in (RunMode.ALL, RunMode.TUNE)
@property
def should_eval(self):
"""
Returns whether it should actually benchmark the model.
"""
return self in (RunMode.ALL, RunMode.EVAL)
class ResultComparisonMetric(Enum):
"""
This changes how it compares the results with the expected value during
accuracy check.
- cosine: Use the cosine similarity. It should be greater than 0.99.
- allclose-1e-4: Use the max elementwise absolute difference. It should be less than 1e-4.
"""
COSINE = "cosine"
ALLCLOSE = "allclose-1e-4"
def parse_args():
"""
Parse arguments
"""
args = argparse.ArgumentParser()
args.add_argument(
"--mode",
type=RunMode,
default=RunMode.ALL,
help=RunMode.__doc__,
)
args.add_argument(
"--batch-size",
type=int,
default=None,
help="The batch size of model input. Use TorchBench's default value if not specified.",
)
args.add_argument(
"--result-metric",
type=ResultComparisonMetric,
default=ResultComparisonMetric.ALLCLOSE,
help=ResultComparisonMetric.__doc__,
)
args.add_argument(
"--benchmark-repeat",
type=int,
default=10,
help="The number of times to repeat the benchmark measurement.",
)
args.add_argument(
"--benchmark-warmup-rounds",
type=int,
default=5,
help="The number of rounds to warmup before starting to measure the performance.",
)
args.add_argument(
"--disallowed-op",
type=str,
default="all",
help=DisallowedOperator.__doc__,
)
# Model selection
args.add_argument(
"--model",
type=str,
required=True,
help="""
The name of model to run. It should a directory name under
https://github.com/pytorch/benchmark/tree/main/torchbenchmark/models.
""",
)
args.add_argument(
"--float32",
action="store_true",
help="""
Cast model and inputs to fp32
""",
)
# Tuning-related config
args.add_argument(
"--target",
type=tvm.target.Target,
required=True,
help="The target to tune and run benchmark for.",
)
args.add_argument(
"--work-dir",
type=str,
required=True,
help="""
The working directory to save intermediate results and store databases for compilation.
""",
)
args.add_argument(
"--strategy",
type=str,
default="evolutionary",
help="The search strategy used by MetaSchdule.",
)
args.add_argument(
"--num-trials",
type=int,
required=True,
help="The max number of trials to run MetaSchedule.",
)
args.add_argument(
"--max-trials-per-task",
type=int,
default=None,
help="""
The max number of trials to run per task extracted in MetaSchedule.
By default it's the same as --num-trials.
""",
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
default="graph",
help="The backend to use for relay compilation(graph / vm).",
)
# TODO(@yelite): Add a layout arg to transform the network after
# ingesting into Relay and before feeding into MetaSchedule.
# Evaluator-related config
args.add_argument(
"--number",
type=int,
default=3,
help="The number of times to run the model for taking average in a single measurement.",
)
args.add_argument(
"--repeat",
type=int,
default=1,
help="The number of times to repeat the measurement.",
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
help="""
Minimum repeat time in ms. The number of runs will be increased if the actual
repeat time is lowered than this.
""",
)
args.add_argument(
"--adaptive-training",
action="store_true",
help="Whether to use adaptive training for cost model.",
)
args.add_argument(
"--cpu-flush",
action="store_true",
help="Whether to perform CPU cache flush.",
)
# RPC-related args
args.add_argument(
"--rpc-host",
type=str,
help="Host of the RPC Tracker for tuning. Use LocalRunner if not provided",
)
args.add_argument(
"--rpc-port",
type=int,
help="Port of the RPC Tracker for tuning",
)
args.add_argument(
"--rpc-key",
type=str,
help="Key of the RPC Tracker for tuning",
)
parsed = args.parse_args()
if parsed.disallowed_op == "all":
disallowed_op = set(DisallowedOperator)
else:
disallowed_op = {DisallowedOperator(v) for v in parsed.disallowed_op.split(",")}
parsed.disallowed_op = disallowed_op
# Trim all args, otherwise it confuses the arg parser of timm_efficientdet
sys.argv = sys.argv[:1]
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = parse_args()
IS_CUDA = ARGS.target.kind.name == "cuda"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
runner = load_torchdynamo_benchmark_runner( # pylint: disable=invalid-name
IS_CUDA,
cosine_similarity=ARGS.result_metric == ResultComparisonMetric.COSINE,
float32=ARGS.float32,
disallowed_operators=ARGS.disallowed_op,
)
def get_meta_schedule_runner() -> ms.runner.PyRunner:
"""
Get the Runner for MetaSchedule.
It returns RPCRunner if --rpc-host is given, otherwise it returns LocalRunner
"""
if ARGS.rpc_host is not None:
assert ARGS.rpc_port is not None, "Missing rpc_port"
assert ARGS.rpc_key is not None, "Missing rpc_key"
return ms.runner.RPCRunner(
rpc_config=ms.runner.RPCConfig(
tracker_host=ARGS.rpc_host,
tracker_port=ARGS.rpc_port,
tracker_key=ARGS.rpc_key,
session_timeout_sec=600,
),
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
)
else:
warnings.warn("Falling back to MetaSchedule LocalRunner because --rpc-host isn't provided.")
return ms.runner.LocalRunner()
def get_graph_executor_forward(
graph_executor_factory: tvm.runtime.Module, device: tvm.runtime.Device
) -> Callable:
"""
Get the forward function for graph executor, in order to integrate with TorchDynamo.
"""
# It has to lazily import this package, loading the C++ PyTorch integration
# after the transformers package is imported when loading model. Otherwise
# there will be segfault caused by the protobuf library.
import tvm.contrib.torch # pylint: disable=import-outside-toplevel, unused-import, redefined-outer-name
save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if save_runtime_mod is None:
warnings.warn(
"C++ PyTorch TVM integration is missing. Fallback to Python forward function."
"Build TVM with 'USE_PT_TVMDSOOP' to enable the C++ custom operator"
)
mod = GraphModule(graph_executor_factory["default"](device))
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(arg.detach().contiguous() for arg in args)
for idx, arg in enumerate(args, 0):
mod.set_input(
f"inp_{idx}",
tvm.nd.from_dlpack(arg),
)
mod.run()
device.sync()
result = [torch.from_dlpack(mod.get_output(i)) for i in range(mod.get_num_outputs())]
return result
return forward
else:
save_runtime_mod(graph_executor_factory.module)
module = torch.classes.tvm_torch.GraphExecutorFactoryWrapper()
def forward(*args): # type: ignore # isort: skip, pylint: disable=function-redefined
return module.forward(args)
return forward
def get_vm_forward(virtual_machine: VirtualMachine, device: tvm.runtime.Device) -> Callable:
"""
Get the forward function for VM, in order to integrate with TorchDynamo.
"""
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(tvm.nd.from_dlpack(arg.detach().contiguous()) for arg in args)
result = virtual_machine.invoke("main", *args)
device.sync()
if isinstance(result, tvm.nd.NDArray):
result = [result]
return [torch.from_dlpack(m) for m in result]
return forward
def should_skip_subgraph(graph_module: torch.fx.GraphModule) -> bool:
"""
Returns whether it should skip optimizing the input graph module.
The graph could be empyt or only containing nodes calling function
for side effect.
"""
graph = graph_module.graph
inputs = [n for n in graph.nodes if n.op == "placeholder"]
outputs = [n for n in graph.nodes if n.op == "output"]
return len(inputs) == 0 and all(output.args == ((),) for output in outputs)
def create_tvm_task_collection_backend() -> Tuple[Callable, List[ms.ExtractedTask]]:
"""
This torchdynamo backend only collects the extracted tasks from MetaSchedule.
It doesn't tune the model.
"""
subgraph_idx = 0
subgraphs_dir = os.path.join(ARGS.work_dir, "subgraphs")
os.makedirs(subgraphs_dir, exist_ok=True)
collected_tasks = []
task_index: Dict[int, List[ms.ExtractedTask]] = defaultdict(list)
def collect_task(task):
task_hash = tvm.ir.structural_hash(task.dispatched[0])
for duplicate_task in task_index[task_hash]:
if tvm.ir.structural_equal(duplicate_task.dispatched[0], task.dispatched[0]):
duplicate_task.weight += task.weight
return
task_index[task_hash].append(task)
collected_tasks.append(task)
def backend(graph_module, example_inputs):
nonlocal subgraph_idx
torch.save(graph_module, os.path.join(subgraphs_dir, f"graph_module_{subgraph_idx}"))
torch.save(example_inputs, os.path.join(subgraphs_dir, f"example_inputs_{subgraph_idx}"))
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
extracted_tasks = ms.relay_integration.extract_tasks(
mod=ir_mod,
target=ARGS.target,
params=params,
)
old_tasks_count = len(collected_tasks)
for task in extracted_tasks:
collect_task(task)
logger.info(
"Extracted %d tasks from graph %d, with %d new tasks",
len(extracted_tasks),
subgraph_idx,
len(collected_tasks) - old_tasks_count,
)
subgraph_idx += 1
return graph_module.forward
return backend, collected_tasks
def create_tvm_compilation_backend(database: ms.database.Database) -> Callable:
"""
This torchdynamo backend compiles the model using history best record from the
MetaSchedule database.
"""
def backend(graph_module, example_inputs):
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
lib = ms.relay_integration.compile_relay(
database=database,
mod=ir_mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
device = tvm.cuda(0) if IS_CUDA else tvm.cpu(0)
if ARGS.backend == "graph":
return get_graph_executor_forward(lib, device)
elif ARGS.backend == "vm":
vm = VirtualMachine(lib, device) # pylint: disable=invalid-name
return get_vm_forward(vm, device)
else:
raise RuntimeError(f"Unknown backend {ARGS.backend}")
return backend
def format_time(seconds: float) -> str:
"""
Format elapsed time based on its value.
"""
if seconds > 1:
return f"{seconds:.3g}s"
else:
return f"{seconds * 1000:.3g}ms"
def is_output_correct(output: torch.Tensor, expected: torch.Tensor) -> bool:
"""
Check whether the output is correct.
"""
comparison_metric = ARGS.result_metric
if comparison_metric == ResultComparisonMetric.COSINE:
return same(expected, output, cosine_similarity=True)
elif comparison_metric == ResultComparisonMetric.ALLCLOSE:
return same(expected, output, tol=1e-4)
else:
raise RuntimeError(f"Unknown comparison metric {comparison_metric}")
def inspect_output_error(output, expected):
"""
Inpsect the error between the actual output and expected output.
"""
if not isinstance(output, torch.Tensor):
logger.info(
f"Unsupported type for error inspection: {type(output).__name__}."
f"Please manually check output.pt"
)
return
output = output.cpu().float()
expected = expected.cpu().float()
abs_error = (output - expected).abs()
rel_error = (abs_error / expected).abs()
def format_error_table(error, bins) -> str:
bin_tensor = torch.as_tensor([float(b) for b in bins], dtype=error.dtype)
error_hist = torch.histogram(error, bin_tensor).hist.int()
return "\n".join(f"< {b}\t{e}" for e, b in zip(error_hist, bins[1:]))
abs_error_bins = [
"-1e10",
"0",
"1e-8",
"1e-6",
"1e-5",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e10",
]
rel_error_bins = [
"-1e10",
"0",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e1",
"1e2",
"1e3",
"1e100",
]
large_rel_error_idx = rel_error > 1
abs_error_with_large_rel_error = abs_error[large_rel_error_idx]
logger.error(f"Expected (PyTorch eager): {expected}")
logger.error(f"Actual (Optimized): {output}")
logger.error(f"Absolute Error\n{format_error_table(abs_error, abs_error_bins)}")
logger.error(f"Relative Error\n{format_error_table(rel_error, rel_error_bins)}")
logger.error(
f"Max absolute error for position with large relative error (> 1):"
f"{abs_error_with_large_rel_error.max()}"
)
def performance_experiment(
model_iter_fn: Callable,
model: torch.nn.Module,
example_inputs: Tuple[torch.Tensor],
) -> str:
"""
Performs the actual benchmarking
Simplified from https://github.com/pytorch/torchdynamo/blob/c537639f9712621dc04ca09908796dbbe86c354b/benchmarks/common.py#L494 pylint: disable=line-too-long
"""
timings = np.zeros((ARGS.benchmark_repeat, 2), np.float64)
if IS_CUDA:
torch.cuda.empty_cache()
is_correct = True
frozen_model_iter_fn = torchdynamo.run(model_iter_fn)
for _ in range(ARGS.benchmark_warmup_rounds):
frozen_model_iter_fn(model, example_inputs)
model_iter_fn(model, example_inputs)
for rep in range(ARGS.benchmark_repeat):
# interleave the runs to handle frequency scaling and load changes
timings[rep, 0], expected_output = timed(
model, model_iter_fn, example_inputs, return_result=True
)
timings[rep, 1], actual_output = timed(
model, frozen_model_iter_fn, example_inputs, return_result=True
)
is_correct = is_correct and is_output_correct(expected_output, actual_output)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
logger.info(
f"eager:{format_time(median[0])} "
f"optimized:{format_time(median[1])} "
f"speedup:{speedup:.3f}x p:{pvalue:.3f}"
)
torch.save(actual_output, os.path.join(ARGS.work_dir, "output.pt"))
torch.save(expected_output, os.path.join(ARGS.work_dir, "expected.pt"))
if not is_correct:
logger.error("Result is incorrect.")
inspect_output_error(actual_output, expected_output)
return ""
def get_torch_device_type(target: tvm.target.Target) -> str:
if target.kind.name == "llvm":
return "cpu"
elif target.kind.name == "cuda":
return "cuda"
else:
raise RuntimeError(f"Unsupported target {target}")
def main():
"""
Entry point of the benchmark
"""
describe()
meta_schedule_work_dir = os.path.join(ARGS.work_dir, "meta_schedule")
os.makedirs(meta_schedule_work_dir, exist_ok=True)
database = ms.database.JSONDatabase(work_dir=meta_schedule_work_dir)
if not ARGS.mode.should_tune:
if len(database) == 0:
raise RuntimeError(
"Script is running in eval mode while the tuning database is empty. "
"Please tune the model first."
)
if IS_CUDA and ARGS.cpu_flush:
warnings.warn(
"Benchmark is running on CUDA, while --cpu-flush is turned on. "
"This flag will have no effect on CUDA."
)
ARGS.cpu_flush = False
try:
logger.info(f"Loading model with batch size: {ARGS.batch_size}")
_, name, model, example_inputs, batch_size = runner.load_model(
get_torch_device_type(ARGS.target),
ARGS.model,
batch_size=ARGS.batch_size,
)
model, example_inputs = runner.maybe_cast(model, example_inputs)
logger.info(f"Got model with batch size: {batch_size}")
except NotImplementedError:
logger.exception(f"{ARGS.model} failed to load")
raise
with contextlib.ExitStack() as stack:
profiler = stack.enter_context(ms.Profiler())
stack.enter_context(torch.no_grad())
tasks_path = os.path.join(ARGS.work_dir, "extracted_tasks")
if ARGS.mode.should_extract:
task_collect_backend, extracted_tasks = create_tvm_task_collection_backend()
task_collect_ctx = torchdynamo.optimize(task_collect_backend)
task_collect_ctx(runner.model_iter_fn)(model, example_inputs)
with open(tasks_path, "wb") as f:
pickle.dump(extracted_tasks, f)
else:
with open(tasks_path, "rb") as f:
extracted_tasks = pickle.load(f)
if ARGS.mode.should_tune:
tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
extracted_tasks=extracted_tasks,
work_dir=ARGS.work_dir,
strategy=ARGS.strategy,
)
database = ms.tune.tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
max_trials_per_task=ARGS.max_trials_per_task,
runner=get_meta_schedule_runner(), # type: ignore
database=database,
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
)
if ARGS.mode.should_eval:
torchdynamo.reset()
model_compile_ctx = torchdynamo.optimize(create_tvm_compilation_backend(database))
model_compile_ctx(runner.model_iter_fn)(model, example_inputs)
with torch.no_grad():
performance_experiment(runner.model_iter_fn, model, example_inputs)
print(profiler.table())
if __name__ == "__main__":
main()
| 25,562 | 31.276515 | 160 | py |
tvm | tvm-main/python/tvm/meta_schedule/testing/torchbench/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/measure_callback/add_to_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A callback that adds the measurement results into the database"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.AddToDatabase")
class AddToDatabase(MeasureCallback):
def __init__(self) -> None:
"""A callback that adds the measurement results into the database"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackAddToDatabase, # type: ignore # pylint: disable=no-member
)
| 1,309 | 41.258065 | 94 | py |
tvm | tvm-main/python/tvm/meta_schedule/measure_callback/remove_build_artifact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A callback that removes the build artifacts from the disk"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.RemoveBuildArtifact")
class RemoveBuildArtifact(MeasureCallback):
def __init__(self) -> None:
"""A callback that removes the build artifacts from the disk"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackRemoveBuildArtifact, # type: ignore # pylint: disable=no-member
)
| 1,317 | 41.516129 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/measure_callback/measure_callback.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule MeasureCallback."""
from typing import TYPE_CHECKING, Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..builder import BuilderResult
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..task_scheduler import TaskScheduler
@register_object("meta_schedule.MeasureCallback")
class MeasureCallback(Object):
"""Rules to apply after measure results is available."""
CallbackListType = Union[List["MeasureCallback"], "MeasureCallback", Literal["default"]]
def apply(
self,
task_scheduler: "TaskScheduler",
task_id: int,
measure_candidates: List[MeasureCandidate],
builder_results: List[BuilderResult],
runner_results: List[RunnerResult],
) -> None:
"""Apply a measure callback to the given schedule.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task id.
measure_candidates: List[MeasureCandidate]
The measure candidates.
builder_results: List[BuilderResult]
The builder results by building the measure candidates.
runner_results: List[RunnerResult]
The runner results by running the built measure candidates.
"""
return _ffi_api.MeasureCallbackApply( # type: ignore # pylint: disable=no-member
self,
task_scheduler,
task_id,
measure_candidates,
builder_results,
runner_results,
)
@staticmethod
def create(kind: Literal["default"]) -> List["MeasureCallback"]:
"""Create a list of measure callbacks."""
if kind == "default":
return _ffi_api.MeasureCallbackDefault() # type: ignore # pylint: disable=no-member
raise ValueError(f"Unknown kind of MeasureCallback list: {kind}")
@register_object("meta_schedule.PyMeasureCallback")
class _PyMeasureCallback(MeasureCallback):
"""
A TVM object measure callback to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyMeasureCallback
"""
def __init__(self, f_apply: Callable, f_as_string: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackPyMeasureCallback, # type: ignore # pylint: disable=no-member
f_apply,
f_as_string,
)
class PyMeasureCallback:
"""
An abstract measure callback with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyMeasureCallback,
"methods": ["apply", "__str__"],
}
def apply(
self,
task_scheduler: "TaskScheduler",
task_id: int,
measure_candidates: List[MeasureCandidate],
builder_results: List[BuilderResult],
runner_results: List[RunnerResult],
) -> None:
"""Apply a measure callback to the given schedule.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task id.
measure_candidates: List[MeasureCandidate]
The measure candidates.
builder_results: List[BuilderResult]
The builder results by building the measure candidates.
runner_results: List[RunnerResult]
The runner results by running the built measure candidates.
"""
raise NotImplementedError
def __str__(self) -> str:
return _get_default_str(self)
| 4,756 | 32.034722 | 98 | py |
tvm | tvm-main/python/tvm/meta_schedule/measure_callback/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tvm.meta_schedule.measure_callback package."""
from .add_to_database import AddToDatabase
from .measure_callback import MeasureCallback, PyMeasureCallback
from .remove_build_artifact import RemoveBuildArtifact
from .update_cost_model import UpdateCostModel
| 1,049 | 46.727273 | 64 | py |
tvm | tvm-main/python/tvm/meta_schedule/measure_callback/update_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A measure callback that updates the cost model"""
from tvm._ffi import register_object
from .. import _ffi_api
from .measure_callback import MeasureCallback
@register_object("meta_schedule.UpdateCostModel")
class UpdateCostModel(MeasureCallback):
def __init__(self) -> None:
"""A measure callback that updates the cost model"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCallbackUpdateCostModel, # type: ignore # pylint: disable=no-member
)
| 1,283 | 40.419355 | 96 | py |
tvm | tvm-main/python/tvm/meta_schedule/feature_extractor/feature_extractor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule FeatureExtractor."""
from typing import Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.runtime.ndarray import NDArray
from .. import _ffi_api
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import _get_default_str
@register_object("meta_schedule.FeatureExtractor")
class FeatureExtractor(Object):
"""Extractor for features from measure candidates for use in cost model."""
FeatureExtractorType = Union[Literal["per-store-feature"], "FeatureExtractor"]
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
"""Extract features from the given measure candidate.
Parameters
----------
context : TuneContext
The tuning context for feature extraction.
candidates : List[MeasureCandidate]
The measure candidates to extract features from.
Returns
-------
features : List[NDArray]
The feature tvm ndarray extracted.
"""
result = _ffi_api.FeatureExtractorExtractFrom( # type: ignore # pylint: disable=no-member
self, context, candidates
)
return result
@staticmethod
def create(
kind: Literal["per-store-feature"],
*args,
**kwargs,
) -> "FeatureExtractor":
"""Create a CostModel."""
from . import PerStoreFeature # pylint: disable=import-outside-toplevel
if kind == "per-store-feature":
return PerStoreFeature(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown CostModel: {kind}")
@register_object("meta_schedule.PyFeatureExtractor")
class _PyFeatureExtractor(FeatureExtractor):
"""
A TVM object feature extractor to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyFeatureExtractor
"""
def __init__(self, f_extract_from: Callable, f_as_string: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.FeatureExtractorPyFeatureExtractor, # type: ignore # pylint: disable=no-member
f_extract_from,
f_as_string,
)
class PyFeatureExtractor:
"""
An abstract feature extractor with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyFeatureExtractor,
"methods": ["extract_from", "__str__"],
}
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
"""Extract features from the given measure candidate.
Parameters
----------
context : TuneContext
The tuning context for feature extraction.
candidates : List[MeasureCandidate]
The measure candidates to extract features from.
Returns
-------
features : List[NDArray]
The feature tvm ndarray extracted.
"""
raise NotImplementedError
def __str__(self) -> str:
return _get_default_str(self)
| 4,206 | 31.361538 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/feature_extractor/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.feature_extractor package.
Meta Schedule feature extractors that extracts features from
measure candidates for use in cost model.
"""
from .feature_extractor import FeatureExtractor, PyFeatureExtractor
from .per_store_feature import PerStoreFeature
from .random_feature_extractor import RandomFeatureExtractor
| 1,121 | 43.88 | 67 | py |
tvm | tvm-main/python/tvm/meta_schedule/feature_extractor/random_feature_extractor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Random Feature Extractor."""
from typing import List, Tuple, Union
import numpy as np # type: ignore
from tvm.runtime.ndarray import NDArray, array
from ..feature_extractor import PyFeatureExtractor
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object
@derived_object
class RandomFeatureExtractor(PyFeatureExtractor):
"""Random Feature Extractor
Parameters
----------
feature_size : int
The size of each block's feature vector.
max_block_num : int
The maximum number of blocks in each schedule.
random_state : Union[Tuple[str, np.ndarray, int, int, float], dict]
The current random state of the f
"""
feature_size: int
max_block_num: int
random_state: Union[Tuple[str, np.ndarray, int, int, float], dict]
def __init__(self, *, feature_size: int = 30, max_block_num: int = 5, seed=0):
super().__init__()
assert max_block_num >= 1, "Max block number must be greater or equal to one!"
self.max_block_num = max_block_num
self.feature_size = feature_size
np.random.seed(seed)
self.random_state = np.random.get_state()
def extract_from(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> List[NDArray]:
np.random.set_state(self.random_state)
result = [
np.random.rand(np.random.randint(1, self.max_block_num + 1), self.feature_size)
for candidate in candidates
]
self.random_state = np.random.get_state()
return [array(x) for x in result]
| 2,411 | 36.107692 | 91 | py |
tvm | tvm-main/python/tvm/meta_schedule/feature_extractor/per_store_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""We extract one feature vector per BufferStoreNode statement in a TIR Stmt,
so we call this feature as "per-store" feature.
"""
from tvm._ffi import register_object
from .. import _ffi_api
from .feature_extractor import FeatureExtractor
@register_object("meta_schedule.PerStoreFeature")
class PerStoreFeature(FeatureExtractor):
"""PerStoreFeature extracts one feature vector per BufferStoreNode
Parameters
----------
buffers_per_store : int
The number of buffers in each BufferStore; Pad or truncate if necessary.
arith_intensity_curve_num_samples : int
The number of samples used in the arithmetic intensity curve.
cache_line_bytes : int
The number of bytes in a cache line.
extract_workload : bool
Whether to extract features in the workload in tuning context or not.
"""
buffers_per_store: int
"""The number of buffers in each BufferStore; Pad or truncate if necessary."""
arith_intensity_curve_num_samples: int # pylint: disable=invalid-name
"""The number of samples used in the arithmetic intensity curve."""
cache_line_bytes: int
"""The number of bytes in a cache line."""
extract_workload: bool
"""Whether to extract features in the workload in tuning context or not."""
feature_vector_length: int
"""Length of the feature vector."""
def __init__(
self,
buffers_per_store: int = 5,
arith_intensity_curve_num_samples: int = 10,
cache_line_bytes: int = 64,
extract_workload: bool = False,
):
self.__init_handle_by_constructor__(
_ffi_api.FeatureExtractorPerStoreFeature, # type: ignore # pylint: disable=no-member
buffers_per_store,
arith_intensity_curve_num_samples,
cache_line_bytes,
extract_workload,
)
| 2,665 | 38.205882 | 97 | py |
tvm | tvm-main/python/tvm/meta_schedule/task_scheduler/task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-tuning Task Scheduler"""
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..builder import Builder, BuilderResult
from ..cost_model import CostModel
from ..database import Database
from ..logging import get_logger, get_logging_func
from ..measure_callback import MeasureCallback
from ..runner import Runner, RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.TaskRecord")
class TaskRecord(Object):
"""The running record of a task."""
ctx: TuneContext
task_weight: float
flop: float
is_terminated: bool
build_error_count: int
run_error_count: int
measure_candidates: List[MeasureCandidate]
builder_results: List[BuilderResult]
runner_results: List[RunnerResult]
@register_object("meta_schedule.TaskScheduler")
class TaskScheduler(Object):
"""The abstract task scheduler interface."""
tasks_: List[TaskRecord]
measure_callbacks_: List[MeasureCallback]
database_: Optional[Database]
cost_model_: Optional[CostModel]
remaining_tasks_: int
TaskSchedulerType = Union["TaskScheduler", Literal["gradient", "round-robin"]]
def next_task_id(self) -> int:
"""Fetch the next task id.
Returns
-------
next_task_id : int
The next task id.
"""
return _ffi_api.TaskSchedulerNextTaskId(self) # type: ignore # pylint: disable=no-member
def join_running_task(self, task_id: int) -> List[RunnerResult]:
"""Wait until the task is finished.
Parameters
----------
task_id : int
The task id to be joined.
Returns
-------
results : List[RunnerResult]
The list of results.
"""
return _ffi_api.TaskSchedulerJoinRunningTask(self, task_id) # type: ignore # pylint: disable=no-member
def tune(
self,
tasks: List[TuneContext],
task_weights: List[float],
max_trials_global: int,
max_trials_per_task: int,
num_trials_per_iter: int,
builder: Builder,
runner: Runner,
measure_callbacks: List[MeasureCallback],
database: Optional[Database],
cost_model: Optional[CostModel],
) -> None:
"""Auto-tuning.
Parameters
----------
tasks : List[TuneContext]
The list of tuning contexts as tasks.
task_weights : List[float]
The list of task weights.
max_trials_global : int
The maximum number of trials globally.
max_trials_per_task : int
The maximum number of trials per task.
num_trials_per_iter : int
The number of trials per iteration.
builder : Builder
The builder.
runner : Runner
The runner.
measure_callbacks : List[MeasureCallback]
The list of measure callbacks.
database : Optional[Database]
The database.
cost_model : Optional[CostModel]
The cost model.
"""
task_weights = [float(w) for w in task_weights]
_ffi_api.TaskSchedulerTune( # type: ignore # pylint: disable=no-member
self,
tasks,
task_weights,
max_trials_global,
max_trials_per_task,
num_trials_per_iter,
builder,
runner,
measure_callbacks,
database,
cost_model,
)
def terminate_task(self, task_id: int) -> None:
"""Terminate the task
Parameters
----------
task_id : int
The task id to be terminated.
"""
_ffi_api.TaskSchedulerTerminateTask(self, task_id) # type: ignore # pylint: disable=no-member
def touch_task(self, task_id: int) -> None:
"""Touch the task and update its status
Parameters
----------
task_id : int
The task id to be checked.
"""
_ffi_api.TaskSchedulerTouchTask(self, task_id) # type: ignore # pylint: disable=no-member
def print_tuning_statistics(self) -> None:
"""Print out a human-readable format of the tuning statistics."""
return _ffi_api.TaskSchedulerPrintTuningStatistics(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["round-robin", "gradient"] = "gradient",
*args,
**kwargs,
) -> "TaskScheduler":
"""Create a task scheduler."""
from . import ( # pylint: disable=import-outside-toplevel
GradientBased,
RoundRobin,
)
if kind == "round-robin":
return RoundRobin(*args, **kwargs) # type: ignore
if kind == "gradient":
return GradientBased(*args, **kwargs)
raise ValueError(f"Unknown TaskScheduler name: {kind}")
create = TaskScheduler.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyTaskScheduler")
class _PyTaskScheduler(TaskScheduler):
"""
A TVM object task scheduler to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyTaskScheduler
"""
def __init__(
self,
f_next_task_id: Callable,
f_join_running_task: Callable,
f_tune: Callable,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerPyTaskScheduler, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
f_next_task_id,
f_join_running_task,
f_tune,
)
class PyTaskScheduler:
"""
An abstract task scheduler with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyTaskScheduler,
"fields": [],
"methods": ["next_task_id", "join_running_task", "tune"],
}
def __init__(self):
...
def tune(
self,
tasks: List[TuneContext],
task_weights: List[float],
max_trials_global: int,
max_trials_per_task: int,
builder: Builder,
runner: Runner,
measure_callbacks: List[MeasureCallback],
database: Optional[Database],
cost_model: Optional[CostModel],
) -> None:
"""Auto-tuning."""
# Using self._outer to replace the self pointer
_ffi_api.TaskSchedulerTune( # type: ignore # pylint: disable=no-member
self._outer(), # type: ignore # pylint: disable=no-member
tasks,
task_weights,
max_trials_global,
max_trials_per_task,
builder,
runner,
measure_callbacks,
database,
cost_model,
)
def next_task_id(self) -> int:
"""Fetch the next task id.
Returns
-------
next_task_id : int
The next task id.
"""
raise NotImplementedError
def join_running_task(self, task_id: int) -> List[RunnerResult]:
"""Wait until the task is finished.
Parameters
----------
task_id : int
The task id to be joined.
"""
# Using self._outer to replace the self pointer
return _ffi_api.TaskSchedulerJoinRunningTask(self._outer(), task_id) # type: ignore # pylint: disable=no-member
| 8,661 | 29.716312 | 120 | py |
tvm | tvm-main/python/tvm/meta_schedule/task_scheduler/round_robin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Round Robin Task Scheduler"""
from tvm._ffi import register_object
from .. import _ffi_api
from ..logging import get_logger, get_logging_func
from .task_scheduler import TaskScheduler
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.RoundRobin")
class RoundRobin(TaskScheduler):
"""Round Robin Task Scheduler"""
def __init__(self) -> None:
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerRoundRobin, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
)
| 1,396 | 36.756757 | 89 | py |
tvm | tvm-main/python/tvm/meta_schedule/task_scheduler/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.task_scheduler package.
Meta Schedule task scheduler that manage the task scheduling
for measure candidates generation and measurement, then save
records to the database.
"""
from .gradient_based import GradientBased
from .round_robin import RoundRobin
from .task_scheduler import PyTaskScheduler, TaskScheduler, create
| 1,131 | 42.538462 | 66 | py |
tvm | tvm-main/python/tvm/meta_schedule/task_scheduler/gradient_based.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Gradient Based Task Scheduler"""
from tvm._ffi import register_object
from .. import _ffi_api
from ..logging import get_logger, get_logging_func
from .task_scheduler import TaskScheduler
logger = get_logger(__name__) # pylint: disable=invalid-name
@register_object("meta_schedule.GradientBased")
class GradientBased(TaskScheduler):
"""Gradient Based Task Scheduler"""
def __init__(
self,
*,
alpha: float = 0.2,
window_size: int = 3,
seed: int = -1,
) -> None:
"""Constructor.
Parameters
----------
alpha : float = 0.2
The parameter alpha in gradient computation.
window_size : int = 3
The parameter to control backward window size in gradient computation.
seed : int = -1
The random seed.
"""
self.__init_handle_by_constructor__(
_ffi_api.TaskSchedulerGradientBased, # type: ignore # pylint: disable=no-member
get_logging_func(logger),
alpha,
window_size,
seed,
)
| 1,880 | 32.589286 | 92 | py |
tvm | tvm-main/python/tvm/meta_schedule/builder/local_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local builder that compile on the local host"""
import os
import tempfile
from typing import Callable, Dict, List, Optional, Union
from tvm._ffi import register_func
from tvm.ir import IRModule
from tvm.runtime import Module, NDArray, load_param_dict, save_param_dict
from tvm.target import Target
from ...contrib.popen_pool import MapResult, PopenPoolExecutor, StatusKind
from ..logging import get_logger
from ..utils import cpu_count, derived_object, get_global_func_with_default_on_worker
from .builder import BuilderInput, BuilderResult, PyBuilder
logger = get_logger(__name__) # pylint: disable=invalid-name
T_BUILD = Callable[ # pylint: disable=invalid-name
[IRModule, Target, Optional[Dict[str, NDArray]]], Module
]
T_EXPORT = Callable[[Module], str] # pylint: disable=invalid-name
def _serialize_params(params: Optional[Dict[str, NDArray]]) -> Optional[bytearray]:
if params is None:
return None
return save_param_dict(params)
def _deserialize_params(params: Optional[bytearray]) -> Optional[Dict[str, NDArray]]:
if params is None:
return None
return load_param_dict(params)
@derived_object
class LocalBuilder(PyBuilder):
"""A builder that builds the given input on local host.
Parameters
----------
pool : PopenPoolExecutor
The process pool to run the build.
max_workers: int
The max number of Popen workers.
timeout_sec : float
The timeout in seconds for the build.
initializer: Optional[Callable[[], None]]
The initializer function for each popen worker.
f_build : Union[None, str, T_BUILD]
Name of the build function to be used.
Defaults to `meta_schedule.builder.default_build`.
f_export : Union[None, str, T_EXPORT]
Name of the export function to be used.
Defaults to `meta_schedule.builder.default_export`.
Attributes
----------
T_BUILD : typing._GenericAlias
The signature of the function `f_build`, which is
.. code-block:: python
def default_build(
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]]
) -> Module:
...
T_EXPORT : typing._GenericAlias
The signature of the function `f_export`, which is
.. code-block:: python
def default_export(mod: Module) -> str:
...
Note
----
The build function and export function should be registered in the worker process.
The worker process is only aware of functions registered in TVM package,
if there are extra functions to be registered,
please send the registration logic via initializer.
"""
max_workers: int
timeout_sec: float
initializer: Optional[Callable[[], None]]
f_build: Union[None, str, T_BUILD]
f_export: Union[None, str, T_EXPORT]
def __init__(
self,
*,
max_workers: Optional[int] = None,
timeout_sec: float = 30.0,
f_build: Union[None, str, T_BUILD] = None,
f_export: Union[None, str, T_EXPORT] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor.
Parameters
----------
max_workers : Optional[int]
The maximum number of worker processes to be used.
Defaults to number of CPUs.
timeout_sec : float
The timeout in seconds for the build.
f_build : T_BUILD
Name of the build function to be used.
Defaults to `meta_schedule.builder.default_build`.
f_export : T_EXPORT
Name of the export function to be used.
Defaults to `meta_schedule.builder.default_export`.
initializer : Optional[Callable[[], None]]
The initializer to be used for the worker processes.
"""
super().__init__()
if max_workers is None:
max_workers = cpu_count(logical=True)
logger.info("LocalBuilder: max_workers = %d", max_workers)
self.max_workers = max_workers
self.timeout_sec = timeout_sec
self.initializer = initializer
self.f_build = f_build
self.f_export = f_export
self._sanity_check()
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
results: List[BuilderResult] = []
map_result: MapResult
# Here we restart the PopenPool everytime because of a known memory leak issue with the
# PopenPool workers after a couple times of usage. We don't apply the same to runners to
# avoid potential problem caused by async behaviour.
pool = PopenPoolExecutor(
max_workers=self.max_workers,
timeout=self.timeout_sec,
initializer=self.initializer,
)
# Dispatch the build inputs to the worker processes.
for map_result in pool.map_with_error_catching(
lambda x: _worker_func(*x),
[
(
self.f_build,
self.f_export,
build_input.mod,
build_input.target,
_serialize_params(build_input.params),
)
for build_input in build_inputs
],
):
if map_result.status == StatusKind.COMPLETE:
results.append(BuilderResult(map_result.value, None))
elif map_result.status == StatusKind.TIMEOUT:
results.append(
BuilderResult(
None,
f"LocalBuilder: Timeout, killed after {self.timeout_sec} seconds",
)
)
elif map_result.status == StatusKind.EXCEPTION:
results.append(
BuilderResult(
None,
"LocalBuilder: An exception occurred\n" + str(map_result.value),
)
)
else:
raise ValueError("Unreachable: unexpected result: {map_result}")
del pool
return results
def _sanity_check(self) -> None:
def _check(f_build, f_export) -> None:
get_global_func_with_default_on_worker(name=f_build, default=None)
get_global_func_with_default_on_worker(name=f_export, default=None)
# Same reason for the single use PopenPool as mentioned above
pool = PopenPoolExecutor(
max_workers=self.max_workers,
timeout=self.timeout_sec,
initializer=self.initializer,
)
value = pool.submit(_check, self.f_build, self.f_export)
value.result()
del pool
def _worker_func(
_f_build: Union[None, str, T_BUILD],
_f_export: Union[None, str, T_EXPORT],
mod: IRModule,
target: Target,
params: Optional[bytearray],
) -> str:
# Step 0. Get the registered functions
f_build: T_BUILD = get_global_func_with_default_on_worker(
_f_build,
default_build,
)
f_export: T_EXPORT = get_global_func_with_default_on_worker(
_f_export,
default_export,
)
# Step 1. Build the IRModule
rt_mod: Module = f_build(mod, target, _deserialize_params(params))
# Step 2. Export the Module
artifact_path: str = f_export(rt_mod)
return artifact_path
@register_func("meta_schedule.builder.default_build")
def default_build(mod: IRModule, target: Target, _params: Optional[Dict[str, NDArray]]) -> Module:
"""Default build function.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built.
_params : Optional[Dict[str, NDArray]]
The parameters to be used for the build. Must be None.
Returns
-------
rt_mod : Module
The built Module.
"""
# pylint: disable=import-outside-toplevel
from tvm.driver import build as tvm_build
from tvm.tir.transform import RemoveWeightLayoutRewriteBlock
# pylint: enable=import-outside-toplevel
mod = RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=True)(mod)
return tvm_build(mod, target=target)
@register_func("meta_schedule.builder.default_export")
def default_export(mod: Module) -> str:
"""Default export function.
Parameters
----------
mod : Module
The Module to be exported.
Returns
-------
artifact_path : str
The path to the exported Module.
"""
from tvm.contrib.tar import tar # pylint: disable=import-outside-toplevel
artifact_path = os.path.join(tempfile.mkdtemp(), "tvm_tmp_mod." + tar.output_format)
mod.export_library(artifact_path, tar)
return artifact_path
| 9,537 | 32.70318 | 98 | py |
tvm | tvm-main/python/tvm/meta_schedule/builder/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.builder package.
Meta Schedule builders that translate IRModule to runtime.Module,
and then export
"""
from .builder import Builder, BuilderInput, BuilderResult, PyBuilder, create
from .local_builder import LocalBuilder
| 1,031 | 42 | 76 | py |
tvm | tvm-main/python/tvm/meta_schedule/builder/builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule builders that translate IRModule to runtime.Module, and then export"""
from typing import Callable, Dict, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import NDArray, Object
from tvm.target import Target
from .. import _ffi_api
@register_object("meta_schedule.BuilderInput")
class BuilderInput(Object):
"""The builder's input.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built for.
params: Optional[Dict[str, NDArray]]
The parameters for Relay build module
"""
mod: IRModule
target: Target
params: Optional[Dict[str, NDArray]]
def __init__(
self,
mod: IRModule,
target: Target,
params: Optional[Dict[str, NDArray]] = None,
) -> None:
"""Constructor.
Parameters
----------
mod : IRModule
The IRModule to be built.
target : Target
The target to be built for.
params: Optional[Dict[str, NDArray]]
The parameters for Relay build module
"""
self.__init_handle_by_constructor__(
_ffi_api.BuilderInput, # type: ignore # pylint: disable=no-member
mod,
target,
params,
)
@register_object("meta_schedule.BuilderResult")
class BuilderResult(Object):
"""The builder's result.
Parameters
----------
artifact_path : Optional[str]
The path to the artifact.
error_msg : Optional[str]
The error message.
"""
artifact_path: Optional[str]
error_msg: Optional[str]
def __init__(
self,
artifact_path: Optional[str],
error_msg: Optional[str],
) -> None:
"""Constructor.
Parameters
----------
artifact_path : Optional[str]
The path to the artifact.
error_msg : Optional[str]
The error message.
"""
self.__init_handle_by_constructor__(
_ffi_api.BuilderResult, # type: ignore # pylint: disable=no-member
artifact_path,
error_msg,
)
@register_object("meta_schedule.Builder")
class Builder(Object):
"""The abstract builder interface."""
BuilderType = Union["Builder", Literal["local"]]
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
"""Build the given inputs.
Parameters
----------
build_inputs : List[BuilderInput]
The inputs to be built.
Returns
-------
build_results : List[BuilderResult]
The results of building the given inputs.
"""
return _ffi_api.BuilderBuild(self, build_inputs) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["local"] = "local",
*args,
**kwargs,
) -> "Builder":
"""Create a Builder.
Parameters
----------
kind : Literal["local"]
The kind of the builder. For now, only "local" is supported.
Returns
-------
builder : Builder
The builder created.
"""
from . import LocalBuilder # pylint: disable=import-outside-toplevel
if kind == "local":
return LocalBuilder(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Builder: {kind}")
create = Builder.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyBuilder")
class _PyBuilder(Builder):
"""
A TVM object builder to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyBuilder
"""
def __init__(self, f_build: Callable = None):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.BuilderPyBuilder, # type: ignore # pylint: disable=no-member
f_build,
)
class PyBuilder:
"""
An abstract builder with customized build method on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {"cls": _PyBuilder, "methods": ["build"]}
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
"""Build the given inputs.
Parameters
----------
build_inputs : List[BuilderInput]
The inputs to be built.
Returns
-------
build_results : List[BuilderResult]
The results of building the given inputs.
"""
raise NotImplementedError
| 5,664 | 27.325 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule"""
from . import cpu, cuda, generic, x86
| 870 | 44.842105 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/generic/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for generic cases"""
| 850 | 46.277778 | 64 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/cuda/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'cuda'"""
from . import layout_transform
| 886 | 43.35 | 68 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/cuda/layout_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""layout_transform scheduling rule for cuda."""
import math
from collections import deque
from typing import List, Optional, Tuple, Union
import tvm
from tvm import meta_schedule
from tvm.tir.schedule import BlockRV, ExprRV, LoopRV
## Tiling layout transforms:
# Assume we have an input shape of [A, B, C, D] and want to layout transform
# ABCD --> DBAC so the output shape would be [D, B, A, C].
#
# Consider reading from the input buffer in a cache-friendly fashion on CPU. We would
# expect a loop structure like:
# lAr, lBr, lCr, lDr = T.grid(A, B, C, D)
#
# Meanwhile consider writing to the output buffer in a cache-friendly fashion on CPU:
# lDw, lBw, lAw, lCw = T.grid(D, B, A, C)
#
# Clearly in many scenarios it is impossible to guarantee contiguous writes and reads
# within a single loop due to non-adjacent dimensions. Instead we work on transposing some
# small sub-tensor of our input writing and then reading from shared memory. We must now
# construct our submatrix so that reading and writing can both be done with some contiguous
# access in global memory.
#
# Consider the case of a 2D transpose. For example [1024, 2048] -> [2048, 1024].
# We note that if we deal with a submatrix of shape [32, 32] which corresponds
# to the dimension of our input tensor, then rows of the submatrix are contiguous
# in the input tensor. Meanwhile, columns of our submatrix are contiguous in our
# output vector. Therefore, with this tile shape we have opportunity to read
# contiguously in our input tensor and write to shared memory, and write contiguously
# to our output tensor.
#
# The multiple dimensional case has a similar analogue. We want to allocate shared
# memory per block of [`tile_size`, `tile_size`]. We want the inner most dimension
# of our shared memory to correspond to contiguous reads from the input tensor and
# the outer dimension to correspond to contiguous writes into the output tensor.
#
# In terms of the loop structure reading from the input tensor, the inner most loops
# of our tile must correspond to the inner most dimensions of the input shape,
# while the outer dimensions correspond to the inner most dimensions of the output shape.
# To obtain an inner tile with this loop structure we factor out a contiguous `tile_size`
# chunk of our loop in the shape of interest.
#
# An example is probably best to show this idea:
# Let's say we want a layout transform of ABCD --> DCAB. With shape
# [1024_a, 2_b, 32_c, 8_d] --> [8_d, 32_c, 1024_a, 2_b]
#
# And tile size 32.
#
# Then we initially have a coalesced-read loop pattern of:
# T.grid(1024_a, 2_b, 32_c, 8_d)
#
# To obtain an inner tile of 32, we factor 4 from 32_c and 8 from 8_d:
# T.grid(1024_a, 2_b, 8_c1, 1_d1, 4_c2t, 8_d2t)
# T.grid(1024_a, 2_b, 8_cr, 1_dr, 32_dim1)
#
# To obtain an outer tile of 32, we factor from B then A to follow contiguous write
# pattern:
#
# T.grid(64_a1, 1_b1, 8_cr, 1_dr, 16_a2t, 2_b2t, 32_dim1)
# T.grid(64_ar, 1_br, 8_cr, 1_dr, 32_dim0, 32_dim1)
#
# Which allows us to read a tile with our wanted properties.
# For writing we use the existing analysis infrastructure to generate the structure for writing.
def tile_layout_transform(
sch: tvm.tir.Schedule,
block_read: BlockRV,
block_write: BlockRV,
src_layout: str,
dst_layout: str,
input_shape: List[int],
tile_size: ExprRV,
) -> Tuple[BlockRV, BlockRV]:
"""
High level tiling for layout transform block. Mutates sch in place.
Parameters
----------
sch:
The initial schedule. We expect `block_read` and `block_write` to correspond to
the blocks which reads and writes from global memory respectively. We also expect
block_read's initial loops to follow
block_read:
The block which reads from global memory and writes to shared memory buffer.
block_write:
The block which writes to global memory and reads from shared memory buffer.
src_layout :
The src_layout, each character should appear once and also appear in dst_layout.
There should be not numeric characters and refer to potentially implicit reshapes.
E.g. the transform NCHW --> NCHW4c really implies NCcHW --> NCHWc. In this case
src_layout should be NCcHW.
dst_layout:
The dst_layout. There should not be numeric characters, e.g. NCHW4c becomes NCHWc.
input_shape:
The input shape after applying potentially implicit reshapes. Should match the loop
extants corresponding to src_layout.
tile_size:
The tile size of read and writes. There will be tile_size threads per block, each of which
reads up to tile_size elements.
Returns
-------
ret:
A tuple of the block that writes to global memory, and the block that reads from
global memory.
"""
def pad_dimension_to_at_least_number(loop: LoopRV, requested_size: int):
"""E.g. if loop has extant of 8 but we want 10, returns size 10 loop with padding."""
left, right = sch.split(loop, [None, requested_size])
return sch.fuse(left, right)
def pad_dimension_to_factor_of_tile_size(
loop: LoopRV, initial_size: int, tile_size: int = tile_size
) -> Tuple[LoopRV, int]:
"""
Pads loop of given size until it is divisible into tile_size.
If the given size of the loop is greater than tile size. Do not pad.
examples:
- loop_size = 5 , tile_size = 32. loop_size --> 8
- loop_size = 5 , tile_size = 36. loop_size --> 6
- loop_size = 8 , tile_size = 32. loop_size --> 8 : since 8 already divides 32.
- loop_size = 33, tile_size = 32. loop_size --> 33 : since 33 > 32.
Returns padded loopRV and the new size.
"""
if tile_size % initial_size == 0:
return loop, int(initial_size)
if initial_size > tile_size or initial_size == tile_size:
return loop, int(initial_size)
# if initial_size > tile_size return without change, factor = 1
size = initial_size
while (tile_size % size) % tile_size > 0:
size += 1
return pad_dimension_to_at_least_number(loop, size), int(size)
def spin_out_factor(
loops: List[LoopRV], loop_extants: List[int], index: int, factor_needed: int
) -> Tuple[List[LoopRV], List[int], int]:
"""
Factor out the requested loop's dimensions to reach the requested factor and
places the requested factor as the innermost loop.
Updates the schedule in-place.
E.g. say we want to factors which eventually multiply to 32 (factor_needed).
Say we have the index we chose is a loop with an extant of 8.
E.g. loops / loop_extants = [3, 32, 6, 8], factor_needed = 32, index=3 (dim=8)
- 8 divides into 32 so we just split up the loop into two loops with extants 1 and 8.
- we then keep the 1-loop in place and move the new 8-loop to back of the list of loops
- ending loops / loop_extants = [3, 32, 6, 1, 8], remaining_factor_needed = 32 / 8 = 4
E.g. loops / loop_extants = [3, 32, 6, 8], factor_needed=32, index=0 (dim=3)
- 3 does not divide 32, so we pad until the extant divides 32, e.g. 4
- we then split up the loop into extants 1 and 4, moving the 4 to the back
- ending loops / loop_extants = [1, 32, 6, 8, 4], remaining_factor_needed = 32 / 4 = 8
E.g. loops / loop_extants = [3, 32, 6, 8], factor_needed=5, index=3 (dim=8)
- 8 is larger than 5 so we immediately do the splitting routine.
- the 8 extant loop becomes loops with extants 2 and 5
- ending loops / loop_extants = [1, 32, 6, 2, 5], remaining_factor_needed = 5 / 5 = 1
After updating loop ordering in place, returns the new list of loops, extants, and the
remaining factor needed.
"""
cur_loop = loops[index]
cur_extant = loop_extants[index]
# Pad loops to divide evenly for factors needed, and split
new_loop, new_size = pad_dimension_to_factor_of_tile_size(
cur_loop, cur_extant, tile_size=factor_needed
)
split_factor = min(new_size, factor_needed)
new_loop_split, factored_loop = sch.split(new_loop, [None, split_factor])
factor_needed = factor_needed // split_factor
# update caching
loops[index] = new_loop_split
loops.append(factored_loop)
loop_extants[index] = math.ceil(int(new_size) / int(split_factor))
loop_extants.append(split_factor)
sch.reorder(*loops)
return loops, loop_extants, factor_needed
def factor_dim_in_order(
indices: List[int],
loops: List[LoopRV],
cur_loop_extants: List[int],
work_needed_inner_loop: int = tile_size,
) -> Tuple[List[LoopRV], List[int]]:
"""Factors out the loops in the order of indices until we reach needed work.
Adds new loop factors to the back in reverse order of access. Returns new list
of loops and their extants.
"""
for i in indices:
loops, cur_loop_extants, work_needed_inner_loop = spin_out_factor(
loops, cur_loop_extants, i, work_needed_inner_loop
)
if work_needed_inner_loop == 1:
break
return loops, cur_loop_extants
def get_high_level_loop_structure(
block_read: BlockRV, input_shape: List[int], src_layout: str, dst_layout: str
):
"""Runs the factorization described above."""
# index 0 ... rank - 1 will always correspond to original loops
# perhaps after they have been factored.
rank = len(input_shape)
loops = sch.get_loops(block_read)
cur_loop_extants = list(input_shape)
# Factor dim0 tile size and fuse things together
loops, cur_loop_extants = factor_dim_in_order(
list(range(rank - 1, -1, -1)),
loops,
cur_loop_extants,
work_needed_inner_loop=tile_size,
)
# The factors which multiply to tile_size are now in back of our
# list of loops. However because we added them by traversing the inner
# dimensions, they are actually reversed order to guarantee the best access
# so reorder before fusing.
loops = loops[:rank] + loops[rank:][::-1]
cur_loop_extants = cur_loop_extants[:rank] + cur_loop_extants[rank::-1]
sch.reorder(*loops)
dim0_loop_tiled = sch.fuse(*loops[rank:])
loops = loops[:rank]
loops.append(dim0_loop_tiled)
cur_loop_extants = cur_loop_extants[:rank]
cur_loop_extants.append(tile_size)
# Same thing with dim1
# [:rank + 1], since we placed dim0_loop_tiled in the end which we want to keep
loops, cur_loop_extants = factor_dim_in_order(
list(
(
src_layout.index(dst_layout[loop_index_dst])
for loop_index_dst in range(rank - 1, -1, -1)
)
),
loops,
cur_loop_extants,
work_needed_inner_loop=tile_size,
)
loops = loops[: rank + 1] + loops[rank + 1 :][::-1]
cur_loop_extants = cur_loop_extants[: rank + 1] + cur_loop_extants[rank + 1 :: -1]
sch.reorder(*loops)
dim1_loop_tiled = sch.fuse(*loops[rank + 1 :])
loops = loops[: rank + 1]
loops.append(dim1_loop_tiled)
cur_loop_extants = cur_loop_extants[: rank + 1]
cur_loop_extants.append(tile_size)
# After this we have loops: [loop1, loop2, loop3 ... dim0_tiled, dim1_tiled]
get_high_level_loop_structure(block_read, input_shape, src_layout, dst_layout)
# If there are insufficient elements, than dim1_tiled or dim0_tiled might be too small
# In all likelihood you should use a smaller tile, but I don't want things to crash.
loops = sch.get_loops(block_read)
loops[-1] = pad_dimension_to_at_least_number(loops[-1], tile_size)
loops[-2] = pad_dimension_to_at_least_number(loops[-2], tile_size)
# We want the dim0 and dim1 parent loops to be the inner most. Right now dim1 is inner-msot
# and we just need to move dim0 in (last dimension of dst).
# Recall right now structure is at least [l1 l2 ... ln, dim0_tiled, dim1_tiled]
# where n >= 2.
dim0_loop_index = src_layout.index(dst_layout[-1])
dim0_loop = loops.pop(dim0_loop_index)
loops = loops[:-3] + [dim0_loop, loops[-3]] + loops[-2:]
sch.reorder(*loops)
# After this loops are: [outer_loop (block binding), dim0_tiled, dim1_tiled]
outer_loop = sch.fuse(*loops[:-2])
# Now that we have the high level loop structure, we can use reverse_compute_at magic
# To get the proper loop structure for writing! This is also as coalesced as possible
# already.
sch.reverse_compute_at(block_write, outer_loop)
# Fuse all inner loops for the write into 2 loops, grab inner loops for both read
# and write block which have locality (we will bind these to threadIdx)
fused_write_loop = sch.fuse(*sch.get_loops(block_write)[1:])
_, inner_write_loop = sch.split(fused_write_loop, [None, tile_size])
inner_read_loop = sch.get_loops(block_read)[-2]
sch.bind(loop=outer_loop, thread_axis="blockIdx.x")
sch.bind(loop=inner_write_loop, thread_axis="threadIdx.x")
sch.bind(loop=inner_read_loop, thread_axis="threadIdx.x")
return block_write, block_read
def create_cached_read(
sch: tvm.tir.Schedule,
block_write: BlockRV,
orig_input_shape: List[int],
orig_src_layout: str,
orig_dst_layout: str,
) -> Tuple[BlockRV, List[int], str, str]:
"""
Creates the cached read block with expected structure.
Loop extants should follow the input shape closely. E.g. if the input is [2, 6, 8], we
expect our loop structure to be T.grid(2, 6, 8). Possibly reshape to handle implicit reshapes,
in which case we will match the implicit reshape shape.
Layout transform allows semantics like NCHW --> NCHW4c. Which involves splitting the original C
axis into contiguous 4-element chunks. This axis is then moved to the end (NCHWc). This is
guaranteed by the operator to be done without additional padding. To handle this we just split
the associating axis (prev. type checking ensures C is divisible by 4)in src_layout found in
block_read. E.g. NCHW -> NCHW4c now becomes NC4cHW -> NCHW4c.
Note: NCHW4c --> NCHW is not allowed, so the only numeric digits will be in dst.
The returned layout strings will be santized and made compatible. E.g. NCHW --> NCHW4c becomes
NCcHW --> NCHWc.
TODO(AndrewZhaoLuo): Investigate using proper memory alignment to avoid bank conflict.
Parameters
----------
sch:
The initial schedule. We expect `block_read`. We also expect
block_read's initial loops to follow the original input shape.
block_read:
The block which reads from global memory and writes to shared memory buffer.
orig_input_shape:
The input shape of the input buffer to the primfunc.
orig_src_layout:
The original src_layout string.
orig_dst_layout:
The original dst_layout string.
Returns
-------
ret:
A tuple of the cached read block, new input shape of shared memory buffer,
the new src_layout, and new dst_layout string.
"""
# Figure out split dimensions, entries are (loop index in src_layout, split amount)
split_dimensions: List[Tuple[int, int]] = []
# This is without numeric digits, e.g. NCHW4c -> NCHWc
new_dst_layout = []
# Use state machine to parse NCHW4c string
split_size = 0
for char in orig_dst_layout:
if char.isnumeric():
split_size = split_size * 10 + int(char)
else:
if char.islower():
# hit axis like 'c', need to find parent axis 'C' in src_layout
src_layout_index = orig_src_layout.index(char.upper())
split_dimensions.append((src_layout_index, split_size))
split_size = 0
new_dst_layout.append(char)
# If no splits were detected we are done
if len(split_dimensions) == 0:
block_read = sch.cache_read(block_write, 0, "shared")
return block_read, orig_input_shape, orig_src_layout, orig_dst_layout
# Calculate final input shapes, each of these are a single element for unsplit dims
# and tuples for split dims associated with the two new axis
input_shape: List[Union[int, Tuple]] = list(orig_input_shape)
new_src_layout: List[Union[str, Tuple]] = list(orig_src_layout)
for src_layout_split_index, split_factor in split_dimensions:
dimension_name = orig_src_layout[src_layout_split_index]
new_src_layout[src_layout_split_index] = (dimension_name, dimension_name.lower())
input_shape[src_layout_split_index] = (
orig_input_shape[src_layout_split_index] // split_factor,
split_factor,
)
# Unpack any tuples introduced via appending
def unpack_list(target_list) -> List:
output: List = []
for ele in target_list:
if isinstance(ele, tuple):
output.extend(ele)
else:
output.append(ele)
return output
new_src_layout_str = "".join(unpack_list(new_src_layout))
new_dst_layout_str = "".join(unpack_list(new_dst_layout))
# Write block loop extants match
dst_to_src_map = [new_dst_layout_str.index(dim) for dim in new_src_layout_str]
block_read = sch.reindex_cache_read(
block_write,
read_buffer_index=0,
index_map=tvm.tir.IndexMap.from_func(
lambda *loops: [loops[dst_to_src_map[i]] for i, _ in enumerate(loops)],
ndim=len(new_src_layout_str),
),
storage_scope="shared",
)
loops_read = sch.get_loops(block_read)
sch.reorder(
*[loops_read[new_dst_layout_str.index(dst_dim_name)] for dst_dim_name in new_src_layout_str]
)
return block_read, unpack_list(input_shape), new_src_layout_str, new_dst_layout_str
def auto_inline_into(sch: tvm.tir.Schedule, start_block: BlockRV) -> BlockRV:
"""
Inlines given start_block's consumers and future dependencies into start_block.
Parameters
----------
sch:
The initial schedule.
start_block:
The block to inline into, should be a block which reads and writes to global memory, doing
layout transform.
Returns
-------
ret:
The new block inlined into it's consumers.
"""
# Rules defined by DefaultCUDA schedule_rule set.
autoinline_rule = meta_schedule.schedule_rule.AutoInline(
into_producer=True,
into_consumer=False,
inline_const_tensor=True,
disallow_if_then_else=False,
require_injective=False,
require_ordered=False,
)
fringe = deque(sch.get_consumers(start_block))
visited = set()
while len(fringe) > 0:
cur_block = fringe.popleft()
if cur_block in visited:
continue
visited.add(cur_block)
consumer_blocks = sch.get_consumers(cur_block)
fringe.extend(consumer_blocks)
sch = autoinline_rule.apply(sch, cur_block)[0]
def get_max_tile_size() -> int:
"""Returns the max tile size.
This is assuming only threads in a warp can have coalesced accesses. 32 is the default if
no target information can be gotten.
"""
max_tile_size = 32
cur_target = tvm.target.Target.current()
if cur_target is not None and hasattr(cur_target, "thread_warp_size"):
max_tile_size = int(cur_target.thread_warp_size)
return max_tile_size
@tvm.register_func("meta_schedule.cuda.layout_transform")
def cuda_layout_transform_schedule_rule(
sch: tvm.tir.Schedule, block: BlockRV, testing_tile_sizes: Optional[List[int]] = None
) -> List[tvm.tir.Schedule]:
"""
Applies tiling scheme to layout transform task (potentially fused with other injective funcs).
Returned schedules will be the default schedule, as well as tiled versions with tile_size in
the range of 2,3...threads_per_warp.
This is assuming only threads in a warp can have coalesced accesses. 32 is the default if
no target information can be gotten.
Parameters
----------
sch:
The initial schedule.
block:
The block corresponding to the layout transform.
Should be a block which reads and writes to global memory, doing layout transform.
testing_tile_sizes:
A list of tile sizes to try, overriding normal settings. For testing. None means
ignore. Else overrides normal settings of tile sizes to try.
Returns
-------
ret:
A list of new schedules to try.
"""
# Info needed for tiling
src_layout = sch.get_sref(block).stmt.annotations["src_layout"]
dst_layout = sch.get_sref(block).stmt.annotations["dst_layout"]
input_shape = [int(c) for c in sch.get_sref(block).stmt.annotations["input_shape"]]
schedules = []
# Always include the default schedules which will be handled via AutoBind schedule rule
# Except during testing
if not testing_tile_sizes:
schedules.append(sch)
sch = sch.copy()
# Inline consumers of the layout transform into the layout transform block.
# Normally default for injective schedules but must manually be called in new schedule rule
# for consumers of the layout transform. TODO(AndrewZhaoLuo): Figure out why this is the case.
auto_inline_into(sch, block)
# Setup up basic structure of schedule of creating read into shared mem, before applying tiling
# Outer loop structure of read block matches that of src_layout
# E.g. if input_shape is [4, 6, 8]. Loops for read block will be
# for i, j, k in T.grid(4, 6, 8):
# ...
# Read block will read from global memory coalesced at the start
# Assume write to output global memory is coalesced in block_write
#
# This also handles the case where there is an implicit reshape going on.
# e.g. NCHW -> NCHW4c which is equivalent to reshaping NCHW
# to NCcHW and then applying the new layout where the extant of c is 4.
# Grab final input shape and src and dst layouts with possible implicit reshape.
block_read, input_shape, src_layout, dst_layout = create_cached_read(
sch, block, input_shape, src_layout, dst_layout
)
# Try tile size 2,3...threads_per_warp as tile size of 1 has no coaslescing.
if testing_tile_sizes is None:
tile_sizes = list(range(2, get_max_tile_size() + 1))
else:
tile_sizes = testing_tile_sizes
for tile_size in tile_sizes:
new_sch = sch.copy()
tile_layout_transform(
new_sch, block_read, block, src_layout, dst_layout, input_shape, tile_size
)
schedules.append(new_sch)
return schedules
| 23,824 | 39.796233 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/x86/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'x86'"""
| 853 | 46.444444 | 67 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule/cpu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Per-block schedule rules in MetaSchedule for target key 'cpu'"""
| 853 | 46.444444 | 67 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rule that mark parallelize, vectorize and unroll to the root block. The mark will be applied to
each block in a follow-up post processor"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.ParallelizeVectorizeUnroll")
class ParallelizeVectorizeUnroll(ScheduleRule):
"""Rule that mark parallelize, vectorize and unroll to the root block. The mark will be applied
to each block in a follow-up post processor
Parameters
----------
max_jobs_per_core: int
The maximum number of jobs to be launched per CPU core. It sets the upper limit of CPU
parallelism, i.e. `num_cores * max_jobs_per_core`.
Use -1 to disable parallelism.
max_vectorize_extent: int
The maximum extent to be vectorized. It sets the upper limit of the hardware target
vectorization.
Use -1 to disable vectorization.
unroll_max_steps: Optional[List[int]]
The options of the maximum number of unroll steps to be done.
Use None to disable unroll
unroll_explicit: bool
Whether to explicitly unroll the loop, or just add an "unroll" pragma
"""
def __init__(
self,
max_jobs_per_core: int = 16,
max_vectorize_extent: int = 16,
unroll_max_steps: Optional[List[int]] = None,
unroll_explicit: bool = True,
) -> None:
if unroll_max_steps is None:
unroll_max_steps = []
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleParallelizeVectorizeUnroll, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
max_vectorize_extent,
unroll_max_steps,
unroll_explicit,
)
| 2,592 | 38.892308 | 104 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/random_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rule that randomly select a compute-at location for a free block"""
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.RandomComputeLocation")
class RandomComputeLocation(ScheduleRule):
"""A rule that randomly select a compute-at location for a free block"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleRandomComputeLocation, # type: ignore # pylint: disable=no-member
)
| 1,324 | 40.40625 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/add_rfactor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add-rfactor Rule that add-rfactor to some blocks if needed"""
from typing import Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AddRFactor")
class AddRFactor(ScheduleRule):
"""Rules for add-rfactor to some blocks if needed.
Parameters
----------
max_jobs_per_core: int
The maximum number of jobs to be launched per CPU core. It sets the uplimit of CPU
parallelism, i.e. `num_cores * max_jobs_per_core`.
Use -1 to disable parallelism.
max_innermost_factor: Optional[int] = None
The maximum size of the innermost factor. None means no limit.
"""
def __init__(
self,
max_jobs_per_core: int = 16,
max_innermost_factor: Optional[int] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAddRFactor, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
max_innermost_factor,
)
| 1,834 | 35.7 | 90 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/schedule_rule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule schedule rules are used for modification of
blocks in a schedule. See also PostOrderApply.
"""
from typing import TYPE_CHECKING, Callable, List
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import BlockRV, Schedule
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
@register_object("meta_schedule.ScheduleRule")
class ScheduleRule(Object):
"""Rules to modify a block in a schedule."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the schedule rule with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the schedule rule.
"""
_ffi_api.ScheduleRuleInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
"""Apply a schedule rule to the specific block in the given schedule.
Parameters
----------
sch : tvm.tir.Schedule
The schedule to be modified.
block : BlockRV
The specific block to apply the schedule rule.
Returns
-------
design_spaces : List[tvm.tir.Schedule]
The list of schedules generated by applying the schedule rule.
"""
return _ffi_api.ScheduleRuleApply( # type: ignore # pylint: disable=no-member
self, sch, block
)
def clone(self) -> "ScheduleRule":
"""Deep clone the schedule rule.
Returns
-------
cloned_rule : ScheduleRule
The cloned schedule rule.
"""
return _ffi_api.ScheduleRuleClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]) -> List["ScheduleRule"]:
"""Create a list of schedule rules for the given kind.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of the schedule rules.
Returns
-------
rules : List[ScheduleRule]
The list of schedule rules.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.ScheduleRuleDefaultLLVM, # type: ignore
"cuda": _ffi_api.ScheduleRuleDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.ScheduleRuleDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.ScheduleRuleDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for schedule rule creation.")
create = ScheduleRule.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyScheduleRule")
class _PyScheduleRule(ScheduleRule):
"""
A TVM object schedule rule to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyScheduleRule
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRulePyScheduleRule, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyScheduleRule:
"""
An abstract schedule rule with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyScheduleRule,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the schedule rule with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the schedule rule.
"""
raise NotImplementedError
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
"""Apply a schedule rule to the specific block in the given schedule.
Parameters
----------
sch : Schedule
The schedule to be modified.
block : BlockRV
The specific block to apply the schedule rule.
Returns
-------
design_spaces : List[Schedule]
The list of schedules generated by applying the schedule rule.
"""
raise NotImplementedError
def clone(self) -> ScheduleRule:
"""Deep clone the schedule rule.
Returns
-------
cloned_rule : ScheduleRule
The cloned schedule rule.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the schedule rule as string with name.
Return
------
result : str
Get the schedule rule as string with name.
"""
return _get_default_str(self)
| 6,338 | 30.537313 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/auto_bind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-bind Rule that binds blocks to threads if needed"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AutoBind")
class AutoBind(ScheduleRule):
"""Auto bind loops around the block to BlockIdx and ThreadIdx
Parameters
----------
max_threadblocks: int
The maximum number of threadblock on GPU.
thread_extents: Optional[List[int]]
Candidates of thread axis extent.
max_threads_per_block: int
The maximum number of threads per block, if it is known when this schedule rule is created.
"""
def __init__(
self,
max_threadblocks: int = 256,
thread_extents: Optional[List[int]] = None,
max_threads_per_block: int = -1,
) -> None:
if thread_extents is None:
thread_extents = [32, 64, 128, 256, 512, 1024]
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAutoBind, # type: ignore # pylint: disable=no-member
max_threadblocks,
thread_extents,
max_threads_per_block,
)
| 1,958 | 35.277778 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/auto_inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Auto-Inline. Rule that inlines spatial blocks if it satisfies some conditions"""
from typing import List, Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.AutoInline")
class AutoInline(ScheduleRule):
"""Rule that inlines spatial blocks if it satisfies some conditions
Parameters
----------
into_producer : bool
If allows to inline a block into its producer
into_consumer : bool
If allows to inline a block into its consumer
inline_const_tensor : bool
Always inline constant tensors
disallow_if_then_else : bool
Always disallow if-then-else-like constructs
require_injective : bool
Always require the read-to-write mapping to be ordered
require_ordered : bool
Always require the read-to-write mapping to be injective
disallow_op : Optional[List[str]]
The operators that are disallowed in auto inline
"""
def __init__(
self,
into_producer: bool,
into_consumer: bool,
inline_const_tensor: bool,
disallow_if_then_else: bool,
require_injective: bool,
require_ordered: bool,
disallow_op: Optional[List[str]] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleAutoInline, # type: ignore # pylint: disable=no-member
into_producer,
into_consumer,
inline_const_tensor,
disallow_if_then_else,
require_injective,
require_ordered,
disallow_op,
)
@register_object("meta_schedule.InlineConstantScalars")
class InlineConstantScalars(ScheduleRule):
"""Inline blocks that produce a constant scalar.
Such blocks get in the way of ReverseComputeInline during AutoInline, since they are also
counted as a producer block unless they are inlined first. So it is recommended to run
InlineConstantScalars before AutoInline.
"""
def __init__(
self,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleInlineConstantScalars, # type: ignore # pylint: disable=no-member
)
| 3,027 | 34.623529 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/multi_level_tiling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multi-level tiling with reuse."""
from typing import Any, Dict, List, Mapping, NamedTuple, Optional, Callable
from tvm.tir.schedule import Schedule, BlockRV
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
class ReuseType(NamedTuple):
"""Reuse type."""
req: str
levels: List[int]
scope: str
def as_dict(self) -> Dict[str, Any]:
"""Return the dict representation of the reuse type."""
return {
"req": self.req,
"levels": self.levels,
"scope": self.scope,
}
@register_object("meta_schedule.MultiLevelTiling")
class MultiLevelTiling(ScheduleRule):
"""Multi-level tiling with reuse.
Parameters
----------
structure : str
The tiling structure. Recommended:
- 'SSRSRS' on CPU
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- None on CPU
- [blockIdx.x, vthread.x, threadIdx.x] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
filter_fn: Optional[Callable[[Schedule, BlockRV], bool]]
A function that can be passed to overwrite the default condition for applying
MultiLevelTiling to a block. This is useful if there is a need to apply MultiLevelTiling
to an operation / block which is ignored by default. This function should return True
for a block that should be tiled (based on the block name, for example).
"""
def __init__(
self,
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
filter_fn: Optional[Callable[[Schedule, BlockRV], bool]] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTiling, # type: ignore # pylint: disable=no-member
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
filter_fn,
)
@register_object("meta_schedule.MultiLevelTilingWithIntrin")
class MultiLevelTilingWithIntrin(ScheduleRule):
"""Extension of MultiLevelTiling for auto-tensorizing with a single intrinsic.
Parameters
----------
intrin_name : str
The name of a tensor intrinsic, must be registerd via TensorIntrin.register(...) beforehand
structure : str
The tiling structure. Recommended:
- 'SSRSRS' on CPU
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- None on CPU
- [blockIdx.x, vthread.x, threadIdx.x] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
"""
def __init__(
self,
intrin_name: str,
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingWithIntrin, # type: ignore # pylint: disable=no-member
intrin_name,
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
)
@register_object("meta_schedule.MultiLevelTilingTensorCore")
class MultiLevelTilingTensorCore(ScheduleRule):
"""Extension of MultiLevelTiling for auto-tensorizing with multiple groups of candidate tensor
core intrinsics.
Parameters
----------
intrin_groups : List[Mapping[str, str]]
A list of groups of tensor core intrinsics. The map should contains key "init", "load_a",
"load_b", "compute", "store", which represent the tensor intrin for initialization,
loading operand A, loading operand B, tensor core computation, storing the result.
The value of the map should be names of tensor intrinsics, must be registerd via
TensorIntrin.register(...) beforehand
structure : str
The tiling structure. Recommended:
- 'SSSRRSRS' on GPU
tile_bind : Optional[List[str]]
For each level of tiles, which thread axis it is bound to. Recommended:
- [blockIdx.y, vthread.x, threadIdx.y] on GPU
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
vector_load_lens : Optional[List[int]]
The length of vector lane in vectorized cooperative fetching.
None means disable vectorization
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
use_software_pipeline : bool
Whether to use the software pipeline.
"""
def __init__(
self,
intrin_groups: List[Mapping[str, str]],
structure: str,
tile_binds: Optional[List[str]] = None,
max_innermost_factor: Optional[int] = None,
vector_load_lens: Optional[List[int]] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
use_software_pipeline: bool = False,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingTensorCore, # type: ignore # pylint: disable=no-member
intrin_groups,
structure,
tile_binds,
max_innermost_factor,
vector_load_lens,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
use_software_pipeline,
)
@register_object("meta_schedule.MultiLevelTilingWideVector")
class MultiLevelTilingWideVector(ScheduleRule):
"""Extension of MultiLevelTiling for backends with wide vectors. The loop over the innermost
spatial axis of the output buffer is always vectorized with the maximum vector length.
Parameters
----------
structure : str
The tiling structure. 'SSRSRS' is recommended.
vector_length_in_bits: int
The length of a vector register in bits.
max_innermost_factor : Optional[int]
The maximum size of the innermost factor. None means no limit
reuse_read : Optional[ReuseType]
Data reuse configuration for reading. None means no reuse.
reuse_write : Optional[ReuseType]
Data reuse configuration for writing. None means no reuse.
"""
def __init__(
self,
structure: str,
vector_length_in_bits: int,
max_innermost_factor: Optional[int] = None,
reuse_read: Optional[ReuseType] = None,
reuse_write: Optional[ReuseType] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleMultiLevelTilingWideVector, # type: ignore # pylint: disable=no-member
structure,
vector_length_in_bits,
max_innermost_factor,
reuse_read.as_dict() if reuse_read is not None else None,
reuse_write.as_dict() if reuse_write is not None else None,
)
| 9,398 | 38.995745 | 104 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/apply_custom_rule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Create a rule that applies customized rules registered using block attribute `schedule_rule`.
The rule will be dispatched according to target keys."""
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.ApplyCustomRule")
class ApplyCustomRule(ScheduleRule):
"""A rule that applies customized rules registered using block attribute `schedule_rule`.
The rule will be dispatched according to target keys."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleApplyCustomRule, # type: ignore # pylint: disable=no-member
)
| 1,467 | 42.176471 | 96 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.schedule_rule package.
Meta Schedule schedule rules are used for modification of
blocks in a schedule. See also PostOrderApply.
"""
from .add_rfactor import AddRFactor
from .apply_custom_rule import ApplyCustomRule
from .auto_bind import AutoBind
from .auto_inline import AutoInline, InlineConstantScalars
from .cross_thread_reduction import CrossThreadReduction
from .multi_level_tiling import (
MultiLevelTiling,
MultiLevelTilingTensorCore,
MultiLevelTilingWideVector,
MultiLevelTilingWithIntrin,
ReuseType,
)
from .parallel_vectorize_unroll import ParallelizeVectorizeUnroll
from .random_compute_location import RandomComputeLocation
from .schedule_rule import PyScheduleRule, ScheduleRule
| 1,524 | 40.216216 | 65 | py |
tvm | tvm-main/python/tvm/meta_schedule/schedule_rule/cross_thread_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Rules which apply cross-thread reduction to some reduction blocks correspondingly when needed"""
from typing import List
from tvm._ffi import register_object
from .. import _ffi_api
from .schedule_rule import ScheduleRule
@register_object("meta_schedule.CrossThreadReduction")
class CrossThreadReduction(ScheduleRule):
"""A schedule rule which applies cross-thread reduction to some reduction blocks
correspondingly when needed
Parameters
----------
thread_extents: List[int]
Candidates of thread axis extent (values are required to be positive).
"""
def __init__(self, thread_extents: List[int]) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ScheduleRuleCrossThreadReduction, # type: ignore # pylint: disable=no-member
thread_extents,
)
| 1,618 | 37.547619 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule Mutator."""
from typing import TYPE_CHECKING, Callable, Dict, Optional
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Trace
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
class Mutator(Object):
"""Mutator is designed to mutate the trace to explore the design space."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the mutator with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the mutator.
"""
_ffi_api.MutatorInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, trace: Trace) -> Optional[Trace]:
"""Apply the mutator function to the given trace.
Parameters
----------
trace : Trace
The given trace for mutation.
Returns
-------
trace : Optional[Trace]
None if mutator failed, otherwise return the mutated trace.
"""
return _ffi_api.MutatorApply(self, trace, -1) # type: ignore # pylint: disable=no-member
def clone(self) -> "Mutator":
"""Clone the mutator.
Returns
-------
mutator : Mutator
The cloned mutator.
"""
return _ffi_api.MutatorClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(
kind: Literal[
"llvm",
"cuda",
"cuda-tensorcore",
"hexagon",
]
) -> Dict["Mutator", float]:
"""Create a list of default mutators.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of mutators.
Returns
-------
mutators : List[Mutator]
The list of mutators.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.MutatorDefaultLLVM, # type: ignore
"cuda": _ffi_api.MutatorDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.MutatorDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.MutatorDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for mutator creation.")
create = Mutator.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyMutator")
class _PyMutator(Mutator):
"""
A TVM object mutator to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyMutator
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.MutatorPyMutator, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyMutator:
"""
An abstract mutator with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyMutator,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the mutator with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the mutator.
"""
raise NotImplementedError
def apply(self, trace: Trace, _) -> Optional[Trace]:
"""Apply the mutator function to the given trace.
Parameters
----------
trace : Trace
The given trace for mutation.
Returns
-------
trace : Optional[Trace]
None if mutator failed, otherwise return the mutated trace.
"""
raise NotImplementedError
def clone(self) -> Mutator:
"""Clone the mutator.
Returns
-------
mutator : Mutator
The cloned mutator.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the mutator as string with name.
Return
------
result : str
Get the mutator as string with name.
"""
return _get_default_str(self)
| 5,779 | 28.191919 | 97 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutate_tile_size.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the decision of instruction Sample-Perfect-Tile"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateTileSize")
class MutateTileSize(Mutator):
"""Mutator that mutates the decision of instruction Sample-Perfect-Tile"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateTileSize, # type: ignore # pylint: disable=no-member
)
| 1,297 | 39.5625 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutate_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates auto unroll step"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateUnroll")
class MutateUnroll(Mutator):
"""Mutator that mutates auto unroll step"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateUnroll, # type: ignore # pylint: disable=no-member
)
| 1,229 | 37.4375 | 85 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutate_parallel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the parallel extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateParallel")
class MutateParallel(Mutator):
"""Mutator that mutates the parallel extent"""
def __init__(self, max_jobs_per_core: int) -> None:
"""Mutator that mutates the parallel extent"""
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateParallel, # type: ignore # pylint: disable=no-member
max_jobs_per_core,
)
| 1,351 | 38.764706 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutate_thread_binding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mutator that mutates the thread binding extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateThreadBinding")
class MutateThreadBinding(Mutator):
"""Mutator that mutates the binding extent"""
def __init__(self) -> None:
"""Mutator that mutates the binding extent"""
self.__init_handle_by_constructor__(
_ffi_api.MutateThreadBinding, # type: ignore # pylint: disable=no-member
)
| 1,308 | 38.666667 | 85 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.mutator package.
Meta Schedule mutator that mutates the trace to explore the
design space.
"""
from .mutator import Mutator, PyMutator
from .mutate_compute_location import MutateComputeLocation
from .mutate_tile_size import MutateTileSize
from .mutate_thread_binding import MutateThreadBinding
from .mutate_parallel import MutateParallel
from .mutate_unroll import MutateUnroll
| 1,189 | 41.5 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/mutator/mutate_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A mutator that mutates the compute-at location decision of SampleComputeLocation"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .mutator import Mutator
@register_object("meta_schedule.MutateComputeLocation")
class MutateComputeLocation(Mutator):
"""A mutator that mutates the compute-at location decision of SampleComputeLocation"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.MutatorMutateComputeLocation, # type: ignore # pylint: disable=no-member
)
| 1,342 | 40.96875 | 94 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Runner utility functions"""
import itertools
from typing import Any, Callable, Dict, List
from ...runtime import Device, Module, ndarray
from .config import EvaluatorConfig
T_ARG_INFO_JSON_OBJ = List[Any] # pylint: disable=invalid-name
T_ARG_INFO_JSON_OBJ_LIST = List[T_ARG_INFO_JSON_OBJ] # pylint: disable=invalid-name
T_ARGUMENT = Any # pylint: disable=invalid-name
T_ARGUMENT_LIST = List[T_ARGUMENT] # pylint: disable=invalid-name
def alloc_argument_common(
f_random_fill: Callable,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Common function to allocate the arguments
Parameters
----------
f_random_fill: Callable
The callable function for random fill
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[T_ARGUMENT_LIST]
The allocation args
"""
def alloc_tensor(_, dtype, shape) -> ndarray.NDArray:
arg = ndarray.empty(shape=shape, dtype=dtype, device=device)
f_random_fill(arg)
return arg
def alloc_fail(*arg_info) -> None:
raise NotImplementedError(arg_info)
dispatcher: Dict[Any, Callable] = {
"TENSOR": alloc_tensor,
None: alloc_fail,
}
repeated_args: List[T_ARGUMENT_LIST] = []
for _ in range(alloc_repeat):
args: T_ARGUMENT_LIST = []
arg_info: T_ARG_INFO_JSON_OBJ
for arg_info in args_info:
arg_type = arg_info[0]
arg: Any = dispatcher.get(arg_type, None)(*arg_info)
args.append(arg)
repeated_args.append(args)
return repeated_args
def run_evaluator_common(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Common function to run the evaluator
Parameters
----------
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
| 3,820 | 30.578512 | 84 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configurations for measurements in the runner"""
import os
from threading import Thread
from typing import NamedTuple, Optional, Union
from tvm import rpc
class EvaluatorConfig(NamedTuple):
"""Config Details of Evaluator
Parameters
----------
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int
Minimum repeat time in ms. if the execution latency is too short,
increase the number of runs to the given time (in ms) to reduce the measurement error.
enable_cpu_cache_flush: bool
Whether to flush the cache on CPU.
Note
----
The total number of actual executions is 1+number*repeat because we would warm up 1 time before
actual run. The number of runs would be increased if run time is below min_repeat_ms.
"""
number: int = 3
repeat: int = 1
min_repeat_ms: int = 100
enable_cpu_cache_flush: bool = False
@staticmethod
def _normalized(config: Optional["EvaluatorConfig"]) -> "EvaluatorConfig":
if config is None:
return EvaluatorConfig()
config = EvaluatorConfig(
number=config.number,
repeat=config.repeat,
min_repeat_ms=config.min_repeat_ms,
enable_cpu_cache_flush=config.enable_cpu_cache_flush,
)
return config
class RPCConfig(NamedTuple):
"""RPC configuration
Parameters
----------
tracker_host: str
Host of the RPC Tracker
tracker_port: int
Port of the RPC Tracker
tracker_key: str
Key of the Tracker
session_timeout_sec: float
Timeout of the RPC session
session_priority: int
Priority of the RPC session
"""
tracker_host: Optional[str] = None
tracker_port: Union[None, int, str] = None
tracker_key: Optional[str] = None
session_priority: int = 1
session_timeout_sec: int = 10
def _sanity_check(self) -> None:
err_str = (
"RPCConfig.{0} is not provided. Please provide it explicitly,"
"or set environment variable {1}"
)
if self.tracker_host is None:
raise ValueError(err_str.format("tracker_host", "TVM_TRACKER_HOST"))
if self.tracker_port is None:
raise ValueError(err_str.format("tracker_port", "TVM_TRACKER_PORT"))
if self.tracker_key is None:
raise ValueError(err_str.format("tracker_key", "TVM_TRACKER_KEY"))
@staticmethod
def _normalized(config: Optional["RPCConfig"]) -> "RPCConfig":
if config is None:
config = RPCConfig()
tracker_host = config.tracker_host or os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = config.tracker_port or os.environ.get("TVM_TRACKER_PORT", None)
tracker_key = config.tracker_key or os.environ.get("TVM_TRACKER_KEY", None)
if isinstance(tracker_port, str):
tracker_port = int(tracker_port)
config = RPCConfig(
tracker_host=tracker_host,
tracker_port=tracker_port,
tracker_key=tracker_key,
session_priority=config.session_priority,
session_timeout_sec=config.session_timeout_sec,
)
config._sanity_check() # pylint: disable=protected-access
return config
def connect_tracker(self) -> rpc.TrackerSession:
"""Connect to the tracker
Returns
-------
tracker : TrackerSession
The connected tracker session
"""
tracker: Optional[rpc.TrackerSession] = None
def _connect():
nonlocal tracker
tracker = rpc.connect_tracker(self.tracker_host, self.tracker_port)
t = Thread(target=_connect)
t.start()
t.join(self.session_timeout_sec)
if t.is_alive() or tracker is None:
raise ValueError(
"Unable to connect to the tracker using the following configuration:\n"
f" tracker host: {self.tracker_host}\n"
f" tracker port: {self.tracker_port}\n"
f" timeout (sec): {self.session_timeout_sec}\n"
"Please check the tracker status via the following command:\n"
" python3 -m tvm.exec.query_rpc_tracker "
f"--host {self.tracker_host} --port {self.tracker_port}"
)
return tracker
def connect_server(self) -> rpc.RPCSession:
"""Connect to the server
Returns
-------
session : RPCSession
The connected rpc session
"""
tracker = self.connect_tracker()
session: rpc.RPCSession = tracker.request(
key=self.tracker_key,
priority=self.session_priority,
session_timeout=self.session_timeout_sec,
)
return session
def count_num_servers(self, allow_missing=True) -> int:
"""Count the number of servers available in the tracker
Parameters
----------
allow_missing : bool
Whether to allow no server to be found.
Returns
-------
num_servers : int
The number of servers
"""
tracker = self.connect_tracker()
tracker_summary = tracker.summary()
result: int = 0
for item in tracker_summary["server_info"]:
_, item_key = item["key"].split(":")
if item_key == self.tracker_key:
result += 1
if result == 0 and not allow_missing:
raise ValueError(
"Unable to find servers with the specific key using the following configuration:\n"
f" tracker host: {self.tracker_host}\n"
f" tracker port: {self.tracker_port}\n"
f" tracker key: {self.tracker_key}\n"
f" timeout (sec): {self.session_timeout_sec}\n"
"Please check the tracker status via the following command:\n"
" python3 -m tvm.exec.query_rpc_tracker "
f"--host {self.tracker_host} --port {self.tracker_port}\n"
f'and look for key: "{self.tracker_key}"'
)
return result
| 7,373 | 35.686567 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Runners"""
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..arg_info import ArgInfo
@register_object("meta_schedule.RunnerInput")
class RunnerInput(Object):
"""The runner's input
Parameters
----------
artifact_path : str
The path to the built artifact.
device_type : str
The device type.
args_info : List[ArgInfo]
The argument information.
"""
artifact_path: str
device_type: str
args_info: List[ArgInfo]
def __init__(
self,
artifact_path: str,
device_type: str,
args_info: List[ArgInfo],
) -> None:
"""Constructor
Parameters
----------
artifact_path : str
The path to the built artifact.
device_type : str
The device type.
args_info : List[ArgInfo]
The argument information.
"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerInput, # type: ignore # pylint: disable=no-member
artifact_path,
device_type,
args_info,
)
@register_object("meta_schedule.RunnerResult")
class RunnerResult(Object):
"""The runner's result
Parameters
----------
run_secs : Optional[List[float]]
The run time in seconds.
error_msg : Optional[str]
The error message, if any.
"""
run_secs: Optional[List[float]]
error_msg: Optional[str]
def __init__(
self,
run_secs: Optional[List[float]],
error_msg: Optional[str],
) -> None:
"""Constructor
Parameters
----------
run_secs : Optional[List[float]]
The run time in seconds.
error_msg : Optional[str]
The error message, if any.
"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerResult, # type: ignore # pylint: disable=no-member
run_secs,
error_msg,
)
@register_object("meta_schedule.RunnerFuture")
class RunnerFuture(Object):
"""
A class to fetch asynchronous runner's output.
This is NOT the user facing class for function overloading inheritance.
Can be used for general return type of runner.
See also: PyRunnerFuture
"""
def __init__(self, f_done: Callable, f_result: Callable = None) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerFuture, # type: ignore # pylint: disable=no-member
f_done,
f_result,
)
def done(self) -> bool:
"""Check whether the runner has finished."""
return _ffi_api.RunnerFutureDone(self) # type: ignore # pylint: disable=no-member
def result(self) -> RunnerResult:
"""Fetch the runner's output if it is ready."""
return _ffi_api.RunnerFutureResult(self) # type: ignore # pylint: disable=no-member
class PyRunnerFuture:
"""
A class to fetch asynchronous runner's output with customizable function on the python side.
This is the user facing class for function overloading inheritance.
Can NOT be used for general return type of runner.
Note: @derived_object is required for proper usage of any inherited class.
Example:
@derived_object
def LocalRunnerFuture(PyRunnerFuture):
...
"""
_tvm_metadata = {
"cls": RunnerFuture,
"methods": ["done", "result"],
}
def done(self) -> bool:
"""Check whether the runner has finished."""
raise NotImplementedError
def result(self) -> RunnerResult:
"""Fetch the runner's output if it is ready."""
raise NotImplementedError
@register_object("meta_schedule.Runner")
class Runner(Object):
"""The abstract runner interface"""
RunnerType = Union["Runner", Literal["local", "rpc"]]
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
"""Run the built artifact and get runner futures.
Parameters
----------
runner_inputs : List[RunnerInput]
The inputs to the runner.
Returns
-------
runner_futures: List[RunnerFuture]
The runner futures.
"""
return _ffi_api.RunnerRun(self, runner_inputs) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal["local", "rpc"] = "local",
*args,
**kwargs,
) -> "Runner":
"""Create a Runner."""
from . import LocalRunner, RPCRunner # pylint: disable=import-outside-toplevel
if kind == "local":
if "max_workers" in kwargs:
kwargs.pop("max_workers")
return LocalRunner(*args, **kwargs) # type: ignore
elif kind == "rpc":
return RPCRunner(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Runner: {kind}")
create = Runner.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyRunner")
class _PyRunner(Runner):
"""
A TVM object runner to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyRunner
"""
def __init__(self, f_run: Callable = None) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.RunnerPyRunner, # type: ignore # pylint: disable=no-member
f_run,
)
class PyRunner:
"""
An abstract runner with customized run method on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyRunner,
"methods": ["run"],
}
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
"""Run the built artifact and get runner futures.
Parameters
----------
runner_inputs : List[RunnerInput]
The inputs to the runner.
Returns
-------
runner_futures: List[RunnerFuture]
The runner futures.
"""
raise NotImplementedError
| 7,196 | 27.44664 | 98 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/rpc_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC Runner"""
import concurrent.futures
import os.path as osp
from contextlib import contextmanager
from typing import Callable, List, Optional, Union
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.rpc import RPCSession
from tvm.runtime import Device, Module
from ..logging import get_logger
from ..profiler import Profiler
from ..utils import (
derived_object,
get_global_func_on_rpc_session,
get_global_func_with_default_on_worker,
)
from .config import EvaluatorConfig, RPCConfig
from .runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from .utils import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
alloc_argument_common,
run_evaluator_common,
)
logger = get_logger(__name__) # pylint: disable=invalid-name
T_CREATE_SESSION = Callable[ # pylint: disable=invalid-name
[RPCConfig], # The RPC configuration
RPCSession, # The RPC Session
]
T_UPLOAD_MODULE = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
str, # local path to the artifact
str, # remote path to the artifact
],
Module, # the Module opened on the remote
]
T_ALLOC_ARGUMENT = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
Device, # The device on the remote
T_ARG_INFO_JSON_OBJ_LIST, # The metadata information of the arguments to be allocated
int, # The number of repeated allocations to be done
],
List[T_ARGUMENT_LIST], # A list of argument lists
]
T_RUN_EVALUATOR = Callable[ # pylint: disable=invalid-name
[
RPCSession, # The RPC Session
Module, # The Module opened on the remote
Device, # The device on the remote
EvaluatorConfig, # The evaluator configuration
List[T_ARGUMENT_LIST], # A list of argument lists
],
List[float], # A list of running time
]
T_CLEANUP = Callable[ # pylint: disable=invalid-name
[
Optional[RPCSession], # The RPC Session to be cleaned up
Optional[str], # remote path to the artifact
],
None,
]
@derived_object
class RPCRunnerFuture(PyRunnerFuture):
"""RPC based runner future
Parameters
----------
future: concurrent.futures.Future
The concurrent function to check when the function is done and to return the result.
timeout_sec: float
The timeout in seconds.
"""
future: concurrent.futures.Future
timeout_sec: float
def __init__(self, future: concurrent.futures.Future, timeout_sec: float) -> None:
"""Constructor
Parameters
----------
future: concurrent.futures.Future
The concurrent function to check when the function is done and to return the result.
timeout_sec: float
The timeout in seconds.
"""
super().__init__()
self.future = future
self.timeout_sec = timeout_sec
def done(self) -> bool:
return self.future.done()
def result(self) -> RunnerResult:
try:
run_secs: List[float] = self.future.result()
except TimeoutError:
return RunnerResult(
None,
error_msg=f"RPCRunner: Timeout, killed after {self.timeout_sec} seconds",
)
except Exception as exception: # pylint: disable=broad-except
return RunnerResult(
None,
error_msg="RPCRunner: An exception occurred\n" + str(exception),
)
return RunnerResult(run_secs, None)
@derived_object
class RPCRunner(PyRunner):
"""RPC based runner
Parameters
----------
rpc_config: RPCConfig
The rpc configuration.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds. TODO(@junrushao1994,@zxybazh): This is not used yet.
alloc_repeat: int
The number of times to repeat the allocation.
f_create_session: Optional[str, Callable]
The function name to create the session or the function itself.
f_upload_module: Optional[str, Callable]
The function name to upload the module or the function itself.
f_alloc_argument: Optional[str, Callable]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Optional[str, Callable]
The function name to run the evaluator or the function itself.
f_cleanup: Optional[str, Callable]
The function name to cleanup the session or the function itself.
pool: PopenPoolExecutor
The popen pool executor.
Attributes
----------
T_CREATE_SESSION : typing._GenericAlias
The signature of the function `f_create_session`, which is:
.. code-block:: python
def default_create_session(rpc_config: RPCConfig) -> RPCSession:
...
T_UPLOAD_MODULE : typing._GenericAlias
The signature of the function `f_upload_module`, which is:
.. code-block:: python
def default_upload_module(
session: RPCSession,
local_path: str,
remote_path: str,
) -> Module:
...
T_ALLOC_ARGUMENT : typing._GenericAlias
The signature of the function `f_alloc_argument`, which is:
.. code-block:: python
def default_alloc_argument(
session: RPCSession,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
...
T_RUN_EVALUATOR : typing._GenericAlias
The signature of the function `f_run_evaluator`, which is:
.. code-block:: python
def default_run_evaluator(
session: RPCSession,
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
...
T_CLEANUP : typing._GenericAlias
The signature of the function `f_cleanup`, which is:
.. code-block:: python
def default_cleanup(
session: Optional[RPCSession],
remote_path: Optional[str],
) -> None:
...
"""
rpc_config: RPCConfig
evaluator_config: EvaluatorConfig
cooldown_sec: float
alloc_repeat: int
f_create_session: Union[T_CREATE_SESSION, str, None]
f_upload_module: Union[T_UPLOAD_MODULE, str, None]
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
f_cleanup: Union[T_CLEANUP, str, None]
pool: PopenPoolExecutor
def __init__(
self,
rpc_config: Optional[RPCConfig] = None,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
f_create_session: Union[T_CREATE_SESSION, str, None] = None,
f_upload_module: Union[T_UPLOAD_MODULE, str, None] = None,
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None] = None,
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None] = None,
f_cleanup: Union[T_CLEANUP, str, None] = None,
max_workers: Optional[int] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor
Parameters
----------
rpc_config: RPCConfig
The rpc configuration.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
f_create_session: Union[T_CREATE_SESSION, str, None]
The function name to create the session or the function itself.
f_upload_module: Union[T_UPLOAD_MODULE, str, None]
The function name to upload the module or the function itself.
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
The function name to run the evaluator or the function itself.
f_cleanup: Union[T_CLEANUP, str, None]
The function name to cleanup the session or the function itself.
max_workers: Optional[int] = None
The maximum number of connections. Defaults to 1.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.rpc_config = RPCConfig._normalized(rpc_config)
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
self.f_create_session = f_create_session
self.f_upload_module = f_upload_module
self.f_alloc_argument = f_alloc_argument
self.f_run_evaluator = f_run_evaluator
self.f_cleanup = f_cleanup
if max_workers is None:
max_workers = 1
logger.info("RPCRunner: max_workers = %d", max_workers)
self.pool = PopenPoolExecutor(
max_workers=max_workers,
initializer=initializer,
)
self._sanity_check()
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results: List[RunnerFuture] = []
for runner_input in runner_inputs:
future = RPCRunnerFuture(
future=self.pool.submit(
_worker_func,
self.f_create_session,
self.f_upload_module,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
self.rpc_config,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
str(runner_input.device_type),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
),
timeout_sec=self.rpc_config.session_timeout_sec,
)
results.append(future) # type: ignore
return results
def _sanity_check(self) -> None:
def _check(
f_create_session,
f_upload_module,
f_alloc_argument,
f_run_evaluator,
f_cleanup,
) -> None:
get_global_func_with_default_on_worker(name=f_create_session, default=None)
get_global_func_with_default_on_worker(name=f_upload_module, default=None)
get_global_func_with_default_on_worker(name=f_alloc_argument, default=None)
get_global_func_with_default_on_worker(name=f_run_evaluator, default=None)
get_global_func_with_default_on_worker(name=f_cleanup, default=None)
value = self.pool.submit(
_check,
self.f_create_session,
self.f_upload_module,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
)
value.result()
def _worker_func(
_f_create_session: Union[T_CREATE_SESSION, str, None],
_f_upload_module: Union[T_UPLOAD_MODULE, str, None],
_f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None],
_f_run_evaluator: Union[T_RUN_EVALUATOR, str, None],
_f_cleanup: Union[T_CLEANUP, str, None],
rpc_config: RPCConfig,
evaluator_config: EvaluatorConfig,
alloc_repeat: int,
artifact_path: str,
device_type: str,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
) -> List[float]:
# Step 0. Get the registered functions
f_create_session: T_CREATE_SESSION = get_global_func_with_default_on_worker(
_f_create_session, default_create_session
)
f_upload_module: T_UPLOAD_MODULE = get_global_func_with_default_on_worker(
_f_upload_module, default_upload_module
)
f_alloc_argument: T_ALLOC_ARGUMENT = get_global_func_with_default_on_worker(
_f_alloc_argument, default_alloc_argument
)
f_run_evaluator: T_RUN_EVALUATOR = get_global_func_with_default_on_worker(
_f_run_evaluator, default_run_evaluator
)
f_cleanup: T_CLEANUP = get_global_func_with_default_on_worker(_f_cleanup, default_cleanup)
# Managed resources
session: Optional[RPCSession] = None
remote_path: Optional[str] = None
@contextmanager
def resource_handler():
try:
yield
finally:
# Final step. Always clean up
with Profiler.timeit("RPCRunner/cleanup"):
f_cleanup(session, remote_path)
with resource_handler():
# Step 1. Create session
with Profiler.timeit("RPCRunner/create_session"):
session = f_create_session(rpc_config)
device = session.device(dev_type=device_type, dev_id=0)
# Step 2. Upload the module
with Profiler.timeit("RPCRunner/upload_module"):
_, remote_path = osp.split(artifact_path)
local_path: str = artifact_path
rt_mod: Module = f_upload_module(session, local_path, remote_path)
# Step 3: Allocate input arguments
with Profiler.timeit("RPCRunner/alloc_argument"):
repeated_args: List[T_ARGUMENT_LIST] = f_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
# Step 4: Run time_evaluator
with Profiler.timeit("LocalRunner/run_evaluator"):
costs: List[float] = f_run_evaluator(
session,
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
def default_create_session(rpc_config: RPCConfig) -> RPCSession:
"""Default function to create the session
Parameters
----------
rpc_config : RPCConfig
The configuration of the RPC session
Returns
-------
session : RPCSession
The created rpc session
"""
return rpc_config.connect_server()
def default_upload_module(
session: RPCSession,
local_path: str,
remote_path: str,
) -> Module:
"""Default function to upload the module
Parameters
----------
session: RPCSession
The session to upload the module
local_path: str
The local path of the module
remote_path: str
The remote path to place the module
Returns
-------
rt_mod : Module
The runtime module
"""
session.upload(local_path, remote_path)
rt_mod: Module = session.load_module(remote_path)
return rt_mod
def default_alloc_argument(
session: RPCSession,
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Default function to allocate the arguments
Parameters
----------
session: RPCSession
The session to allocate the arguments
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[Args]
The allocation args
"""
f_random_fill = get_global_func_on_rpc_session(
session,
"tvm.contrib.random.random_fill_for_measure",
"Please make sure 'USE_RANDOM' is turned ON in the config.cmake on the RPC server.",
)
return alloc_argument_common(f_random_fill, device, args_info, alloc_repeat)
def default_run_evaluator(
session: RPCSession, # pylint: disable=unused-argument
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Default function to run the evaluator
Parameters
----------
session: RPCSession
The session to run the evaluator
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
return run_evaluator_common(rt_mod, device, evaluator_config, repeated_args)
def default_cleanup(
session: Optional[RPCSession],
remote_path: Optional[str],
) -> None:
"""Default function to clean up the session
Parameters
----------
session: RPCSession
The session to clean up
remote_path: str
The remote path to clean up
"""
if session is not None and remote_path is not None:
session.remove(remote_path)
session.remove(remote_path + ".so")
session.remove("")
| 17,682 | 32.052336 | 96 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.runner package.
Meta Schedule runners that runs an artifact either locally or through the RPC interface
"""
from .config import EvaluatorConfig, RPCConfig
from .local_runner import LocalRunner, LocalRunnerFuture
from .rpc_runner import RPCRunner
from .runner import (
PyRunner,
PyRunnerFuture,
Runner,
RunnerFuture,
RunnerInput,
RunnerResult,
create,
)
| 1,192 | 35.151515 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/runner/local_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Local Runner"""
from contextlib import contextmanager
from typing import Callable, List, Optional, Union
import subprocess
import tvm
from ...contrib.popen_pool import PopenPoolExecutor
from ...runtime import Device, Module
from ..logging import get_logger
from ..profiler import Profiler
from ..utils import derived_object, get_global_func_with_default_on_worker
from .config import EvaluatorConfig
from .runner import PyRunner, PyRunnerFuture, RunnerFuture, RunnerInput, RunnerResult
from .utils import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
alloc_argument_common,
run_evaluator_common,
)
logger = get_logger(__name__) # pylint: disable=invalid-name
T_ALLOC_ARGUMENT = Callable[ # pylint: disable=invalid-name
[
Device, # The device on the remote
T_ARG_INFO_JSON_OBJ_LIST, # The metadata information of the arguments to be allocated
int, # The number of repeated allocations to be done
],
List[T_ARGUMENT_LIST], # A list of argument lists
]
T_RUN_EVALUATOR = Callable[ # pylint: disable=invalid-name
[
Module, # The Module opened on the remote
Device, # The device on the remote
EvaluatorConfig, # The evaluator configuration
List[T_ARGUMENT_LIST], # A list of argument lists
],
List[float], # A list of running time
]
T_CLEANUP = Callable[ # pylint: disable=invalid-name
[],
None,
]
@derived_object
class LocalRunnerFuture(PyRunnerFuture):
"""Local based runner future
Parameters
----------
res: Optional[List[float]]
The optional result as a list of float.
error_message: Optional[str]
The optional error message.
Note
----
Only one of the parameters should be None upon the creation
of LocalRunnerFuture object
"""
res: Optional[List[float]]
error_message: Optional[str]
def __init__(
self, res: Optional[List[float]] = None, error_message: Optional[str] = None
) -> None:
"""Constructor
Parameters
----------
res: Optional[List[float]]
The result of this LocalRunnerFuture
error_message: Optional[str]
The stringfied error message of any exception during execution
"""
super().__init__()
self.res = res
self.error_message = error_message
# sanity check upon the creation of LocalRunnerFuture object
if (res is None and error_message is None) or (
res is not None and error_message is not None
):
raise AttributeError(
"Only one of the two parameters should be None upon the creation"
"of LocalRunnerFuture object."
)
def done(self) -> bool:
return True
def result(self) -> RunnerResult:
return RunnerResult(self.res, self.error_message)
def _worker_func(
_f_alloc_argument: Optional[str],
_f_run_evaluator: Optional[str],
_f_cleanup: Optional[str],
evaluator_config: EvaluatorConfig,
alloc_repeat: int,
artifact_path: str,
device_type: str,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
) -> List[float]:
f_alloc_argument: T_ALLOC_ARGUMENT = get_global_func_with_default_on_worker(
_f_alloc_argument, default_alloc_argument
)
f_run_evaluator: T_RUN_EVALUATOR = get_global_func_with_default_on_worker(
_f_run_evaluator, default_run_evaluator
)
f_cleanup: T_CLEANUP = get_global_func_with_default_on_worker(_f_cleanup, default_cleanup)
@contextmanager
def resource_handler():
try:
yield
finally:
# Final step. Always clean up
with Profiler.timeit("LocalRunner/cleanup"):
f_cleanup()
with resource_handler():
# Step 1: create the local runtime module
with Profiler.timeit("LocalRunner/load_module"):
rt_mod = tvm.runtime.load_module(artifact_path)
# Step 2: Allocate input arguments
with Profiler.timeit("LocalRunner/alloc_argument"):
device = tvm.runtime.device(dev_type=device_type, dev_id=0)
repeated_args: List[T_ARGUMENT_LIST] = f_alloc_argument(
device,
args_info,
alloc_repeat,
)
# Step 3: Run time_evaluator
with Profiler.timeit("LocalRunner/run_evaluator"):
costs: List[float] = f_run_evaluator(
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
@derived_object
class LocalRunner(PyRunner):
"""Local runner
Parameters
----------
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to repeat the allocation.
f_alloc_argument: Optional[str, Callable]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Optional[str, Callable]
The function name to run the evaluator or the function itself.
f_cleanup: Optional[str, Callable]
The function name to cleanup the session or the function itself.
pool: PopenPoolExecutor
The popen pool executor.
Attributes
----------
T_ALLOC_ARGUMENT : typing._GenericAlias
The signature of the function `f_alloc_argument`, which is:
.. code-block:: python
def default_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
...
T_RUN_EVALUATOR : typing._GenericAlias
The signature of the function `f_run_evaluator`, which is:
.. code-block:: python
def default_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
...
T_CLEANUP : typing._GenericAlias
The signature of the function `f_cleanup`, which is:
.. code-block:: python
def default_cleanup() -> None:
...
"""
timeout_sec: float
evaluator_config: EvaluatorConfig
cooldown_sec: float
alloc_repeat: int
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
f_cleanup: Union[T_CLEANUP, str, None]
pool: PopenPoolExecutor
def __init__(
self,
timeout_sec: float = 30,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None] = None,
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None] = None,
f_cleanup: Union[T_CLEANUP, str, None] = None,
initializer: Optional[Callable[[], None]] = None,
) -> None:
"""Constructor
Parameters
----------
timeout_sec: float
The timeout setting.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
f_alloc_argument: Union[T_ALLOC_ARGUMENT, str, None]
The function name to allocate the arguments or the function itself.
f_run_evaluator: Union[T_RUN_EVALUATOR, str, None]
The function name to run the evaluator or the function itself.
f_cleanup: Union[T_CLEANUP, str, None]
The function name to cleanup the session or the function itself.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.timeout_sec = timeout_sec
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
self.f_alloc_argument = f_alloc_argument
self.f_run_evaluator = f_run_evaluator
self.f_cleanup = f_cleanup
logger.info("LocalRunner: max_workers = 1")
self.pool = PopenPoolExecutor(
max_workers=1, # one local worker
timeout=timeout_sec,
initializer=initializer,
stderr=subprocess.DEVNULL, # suppress the stderr output
)
self._sanity_check()
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results: List[RunnerFuture] = []
for runner_input in runner_inputs:
future = self.pool.submit(
_worker_func,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
str(runner_input.device_type),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
)
try:
result: List[float] = future.result()
error_message: str = None
except TimeoutError:
result = None
error_message = f"LocalRunner: Timeout, killed after {self.timeout_sec} seconds\n"
except Exception as exception: # pylint: disable=broad-except
result = None
error_message = "LocalRunner: An exception occurred\n" + str(exception)
local_future = LocalRunnerFuture(res=result, error_message=error_message)
results.append(local_future) # type: ignore
return results
def _sanity_check(self) -> None:
def _check(
f_alloc_argument,
f_run_evaluator,
f_cleanup,
) -> None:
get_global_func_with_default_on_worker(name=f_alloc_argument, default=None)
get_global_func_with_default_on_worker(name=f_run_evaluator, default=None)
get_global_func_with_default_on_worker(name=f_cleanup, default=None)
value = self.pool.submit(
_check,
self.f_alloc_argument,
self.f_run_evaluator,
self.f_cleanup,
)
value.result()
def default_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST,
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
"""Default function to allocate the arguments
Parameters
----------
device: Device
The device to allocate the arguments
args_info: T_ARG_INFO_JSON_OBJ_LIST
The arguments info
alloc_repeat: int
The number of times to repeat the allocation
Returns
-------
repeated_args: List[T_ARGUMENT_LIST]
The allocation args
"""
f_random_fill = get_global_func_with_default_on_worker(
name="tvm.contrib.random.random_fill_for_measure", default=None
)
return alloc_argument_common(f_random_fill, device, args_info, alloc_repeat)
def default_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[T_ARGUMENT_LIST],
) -> List[float]:
"""Default function to run the evaluator
Parameters
----------
rt_mod: Module
The runtime module
device: Device
The device to run the evaluator
evaluator_config: EvaluatorConfig
The evaluator config
repeated_args: List[T_ARGUMENT_LIST]
The repeated arguments
Returns
-------
costs: List[float]
The evaluator results
"""
return run_evaluator_common(rt_mod, device, evaluator_config, repeated_args)
def default_cleanup() -> None:
"""Default function to clean up the session"""
pass # pylint: disable=unnecessary-pass
| 12,681 | 31.685567 | 98 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/mlp_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# type: ignore[import]
"""
Segment Sum MLP cost model
"""
import glob
import math
import os
import random
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
import torch # type: ignore
import tvm
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ...target import Target
from ..cost_model import PyCostModel
from ..database import JSONDatabase
from ..feature_extractor import FeatureExtractor, PerStoreFeature
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object, shash2hex
logger = get_logger("mlp_model") # pylint: disable=invalid-name
# pylint: disable=no-member,import-outside-toplevel
class SegmentSumMLPConfig(NamedTuple):
"""SegmentSum MLP model configuration
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int = 172
hidden_dim: int = 256
output_dim: int = 1
use_norm: bool = False
use_sigmoid: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"input_dim": self.input_dim,
"hidden_dim": self.hidden_dim,
"output_dim": self.output_dim,
"use_norm": self.use_norm,
"use_sigmoid": self.use_sigmoid,
}
class TrainerConfig(NamedTuple):
"""Trainer configuration
Parameters
----------
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
"""
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"batch_size": self.batch_size,
"learning_rate": self.learning_rate,
"weight_decay": self.weight_decay,
"num_epoch_full": self.num_epoch_full,
"num_epoch_incremental": self.num_epoch_incremental,
"grad_clip_norm": self.grad_clip_norm,
"train_verbose": self.train_verbose,
"test_interval": self.test_interval,
"test_split": self.test_split,
"frozen": self.frozen,
}
# pylint: disable=too-few-public-methods
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append( # pylint: disable=missing-function-docstring
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
# pylint: disable=too-many-instance-attributes
class SegmentDataLoader:
"""Dataloader for Segment Sum MLP model.
Parameters
----------
features : List[np.ndarray]
The features
results : np.ndarray
The measured results, can be None.
batch_size : int
The batch size
shuffle : bool
Whether to shuffle the dataset or not
"""
def __init__(
self,
features,
results=None,
batch_size=128,
shuffle=True,
):
self.batch_size = batch_size
self.shuffle = shuffle
self.data_size = len(features)
# flatten features and store the starting indices
self.segment_sizes = torch.tensor([len(feature) for feature in features], dtype=torch.int32)
self.feature_offsets = (
torch.cumsum(self.segment_sizes, 0, dtype=torch.int32) - self.segment_sizes
)
features = torch.cat([torch.tensor(feature) for feature in features])
norm, _ = features.max(dim=0)
norm[norm == 0] = 1
self.features = features / norm
self.results = torch.tensor(results) if results is not None else None
self.iter_order = self.pointer = None
def __len__(self):
return self.data_size
def __iter__(self):
if self.shuffle:
self.iter_order = torch.randperm(self.data_size)
else:
self.iter_order = torch.arange(self.data_size)
self.pointer = 0
return self
def __next__(self):
if self.pointer >= self.data_size:
raise StopIteration
batch_indices = self.iter_order[self.pointer : self.pointer + self.batch_size]
self.pointer += self.batch_size
return self._fetch_indices(batch_indices)
def _fetch_indices(self, indices):
segment_sizes, feature_offsets = self.segment_sizes[indices], self.feature_offsets[indices]
feature_indices = torch.empty(segment_sizes.sum(), dtype=torch.int32)
idx = 0
for offset, seg_size in zip(feature_offsets, segment_sizes):
feature_indices[idx : idx + seg_size] = torch.arange(offset, offset + seg_size)
idx += seg_size
features = self.features[feature_indices.long()]
results = None
if self.results is not None:
results = self.results[indices.long()]
return segment_sizes, features, results
def lambda_rank_loss( # pylint: disable=too-many-locals
preds: "torch.Tensor",
labels: "torch.Tensor",
k: int = None,
eps: float = 1e-10,
sigma: float = 1.0,
) -> "torch.Tensor":
"""
LambdaLoss: Metric-Driven Loss for Learning-to-Rank
Parameters
----------
preds : Tensor
The predicted runtime for each candidate.
labels : Tensor
The measured runtime for each candidate.
k : int
Loss for top k.
Default is None, which means computing all scores.
eps : float
The minimum value to the denominator and argument of log if they reach 0.
sigma : float
The scaling factor to the input of the sigmoid function.
Returns
-------
loss : Tensor
The lambda rank loss.
"""
device = preds.device
y_pred, y_true = preds[None, :], labels[None, :]
y_pred_sorted, indices_pred = y_pred.sort(descending=True, dim=-1)
y_true_sorted, _ = y_true.sort(descending=True, dim=-1)
true_sorted_by_preds = torch.gather(y_true, dim=1, index=indices_pred)
true_diffs = true_sorted_by_preds[:, :, None] - true_sorted_by_preds[:, None, :]
padded_pairs_mask = torch.isfinite(true_diffs) & (true_diffs > 0)
ndcg_at_k_mask = torch.zeros(
(y_pred.shape[1], y_pred.shape[1]), dtype=torch.bool, device=device
)
ndcg_at_k_mask[:k, :k] = 1
true_sorted_by_preds.clamp_(min=0.0)
y_true_sorted.clamp_(min=0.0)
pos_idxs = torch.arange(1, y_pred.shape[1] + 1).to(device)
D = torch.log2(1.0 + pos_idxs.float())[None, :] # pylint: disable=invalid-name
maxDCGs = torch.sum( # pylint: disable=invalid-name
((torch.pow(2, y_true_sorted) - 1) / D)[:, :k], dim=-1
).clamp(min=eps)
G = (torch.pow(2, true_sorted_by_preds) - 1) / maxDCGs[:, None] # pylint: disable=invalid-name
weights = torch.abs(
torch.pow(D[:, :, None], -1.0) - torch.pow(D[:, None, :], -1.0)
) * torch.abs(G[:, :, None] - G[:, None, :])
scores_diffs = (y_pred_sorted[:, :, None] - y_pred_sorted[:, None, :]).clamp(min=-1e8, max=1e8)
scores_diffs[torch.isnan(scores_diffs)] = 0.0
weighted_probs = (torch.sigmoid(sigma * scores_diffs).clamp(min=eps) ** weights).clamp(min=eps)
losses = torch.log2(weighted_probs)
masked_losses = losses[padded_pairs_mask & ndcg_at_k_mask]
loss = -torch.sum(masked_losses)
return loss
def topk_score(
pred_results: "torch.Tensor",
gt_results: "torch.Tensor",
k: int,
) -> float:
"""
Evaluate the top-k score
Parameters
----------
pred_results: Tensor
The raw prediction
gt_results: Tensor
The measured results
k : int
The k in top k score
Returns
-------
score : float
The top-k score
"""
k = min(k, len(pred_results))
topk_indices = torch.topk(pred_results, k, largest=False).indices
score = gt_results.min() / gt_results[topk_indices].min()
return score.item()
class SegmentSumMLP(torch.nn.Module):
"""Segment Sum MLP model.
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int
hidden_dim: int
output_dim: int
use_norm: bool
use_sigmoid: bool
def __init__( # pylint: disable=too-many-arguments
self,
input_dim: int = 172,
hidden_dim: int = 256,
output_dim: int = 1,
use_norm: bool = False,
use_sigmoid: bool = False,
):
from torch import nn # type: ignore
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.norm = nn.BatchNorm1d(hidden_dim) if use_norm else nn.Identity()
self.layer0 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.decoder = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid() if use_sigmoid else nn.Identity()
def forward( # pylint: disable=missing-function-docstring
self,
segment_sizes: "torch.Tensor",
features: "torch.Tensor",
) -> "torch.Tensor":
n_seg = len(segment_sizes)
encoded_features = self.encoder(features)
segment_indices = torch.repeat_interleave(
torch.arange(n_seg, device=features.device),
segment_sizes.long(),
)
n_dim = encoded_features.shape[1]
segment_sum = torch.scatter_add(
input=torch.zeros((n_seg, n_dim), dtype=encoded_features.dtype, device=features.device),
dim=0,
index=segment_indices.view(-1, 1).expand(-1, n_dim),
src=encoded_features,
)
out = self.norm(segment_sum)
out = self.layer0(out) + out
out = self.layer1(out) + out
out = self.decoder(out).squeeze()
out = self.sigmoid(out)
return out
def extract_features(
context: TuneContext,
candidates: List[MeasureCandidate],
results: Optional[List[RunnerResult]] = None,
extractor: Optional[FeatureExtractor] = None,
):
"""Extract feature vectors and compute mean costs.
Parameters
----------
context: TuneContext
The tuning context.
candidates: List[MeasureCandidate]
The measure candidates.
results: Optional[List[RunnerResult]]
The measured results, can be None if used in prediction.
extractor: Optional[FeatureExtractor]
The feature extractor.
Returns
-------
new_features: List[np.ndarray]
The extracted features.
new_mean_costs: np.ndarray
The mean costs.
"""
extractor = extractor or PerStoreFeature(extract_workload=True)
def _feature(feature: NDArray) -> np.ndarray:
return feature.numpy().astype("float32")
def _mean_cost(res: RunnerResult) -> float:
if not res.run_secs:
return 1e10
return float(np.median([float(s) for s in res.run_secs]))
new_features = [_feature(x) for x in extractor.extract_from(context, candidates)]
new_mean_costs = (
np.array([_mean_cost(x) for x in results]).astype("float32")
if results is not None
else None
)
return new_features, new_mean_costs
class State:
"""State of the trainer
Parameters
----------
model: SegmentSumMLP
The cost model.
data: Dict[str, FeatureGroup]
The data groups.
data_size: int
The size of all data.
untrained_size: int
The size of the untrained data.
"""
model: SegmentSumMLP
data: Dict[str, FeatureGroup]
data_size: int
untrained_size: int
def __init__(
self,
model_config: Optional[SegmentSumMLPConfig] = None,
extractor: Optional[FeatureExtractor] = None,
):
model_config = model_config or SegmentSumMLPConfig()
extractor = extractor or PerStoreFeature(extract_workload=True)
self.model = SegmentSumMLP(**model_config.to_dict())
self.data = OrderedDict()
self.data_size = 0
self.untrained_size = 0
self.extractor = extractor
def load( # pylint: disable=too-many-locals
self,
path: str,
target: str = "nvidia/nvidia-v100",
) -> None:
"""Load the cached model, cached features, or raw data.
Parameters
----------
path: str
The path to the tar file containing cached model, cached features,
or raw data.
target: str
The target for the tuning context.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
raw_path = os.path.join(tmp_dir, "raw_data")
untar(path, tmp_dir)
if os.path.exists(model_path):
self.model.load_state_dict(torch.load(model_path))
if os.path.exists(cache_path):
for group_hash, features, costs in np.load(cache_path, allow_pickle=True):
self.data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
self.data_size += len(costs)
self.untrained_size += len(costs)
elif os.path.exists(raw_path):
from tqdm import tqdm # type: ignore
model_dirs = glob.glob(os.path.join(raw_path, "*"))
workload_paths = []
for model_dir in model_dirs:
json_files = glob.glob(os.path.join(model_dir, "*.json"))
for json_file in json_files:
if json_file.endswith("_workload.json"):
workload_paths.append(json_file)
for workload_path in tqdm(workload_paths):
try:
database = JSONDatabase(
path_workload=workload_path,
path_tuning_record=workload_path.replace(
"_workload.json", "_candidates.json"
),
)
except tvm._ffi.base.TVMError: # pylint: disable=protected-access
continue
candidates, results = [], []
tuning_records = database.get_all_tuning_records()
if len(tuning_records) == 0:
continue
for record in tuning_records:
candidates.append(record.as_measure_candidate())
results.append(RunnerResult(run_secs=record.run_secs, error_msg=None))
assert len(candidates) == len(results)
context = TuneContext(mod=tuning_records[0].workload.mod, target=Target(target))
features, mean_costs = extract_features(
context, candidates, results, self.extractor
)
self.add_to_group(features, mean_costs, shash2hex(context.mod))
def save(self, path: str) -> None:
"""Cache the model and data.
Parameters
----------
path: str
The path to the cached tar file.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
torch.save(self.model.state_dict(), model_path)
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=cache_path,
arr=np.array(data, dtype=object),
)
tar(path, [x for x in [model_path, cache_path] if x is not None])
logger.info("Saved MLPModel to %s", path)
def add_to_group(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Add features and costs to the data groups with key group_hash.
Parameters
----------
features: List[np.ndarray]
The feature vectors.
costs: np.ndarray
The measured results.
group_hash: str
The structural hash of the candidates.
"""
group = self.data.get(group_hash, None)
if group is None:
group = FeatureGroup(
group_hash=group_hash,
features=features,
costs=costs,
)
else:
group.append(features, costs)
self.data[group_hash] = group
self.data_size += len(features)
self.untrained_size += len(features)
class SegmentSumMLPTrainer:
"""The trainer for Segment Sum MLP model.
Parameters
----------
state: State
The state of the trainer.
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
optimizer: "torch.optim.adam.Adam"
The optimizer.
scheduler: "torch.optim.lr_scheduler.StepLR"
The scheduler.
"""
state: State
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
optimizer: "torch.optim.adam.Adam" # type: ignore
scheduler: "torch.optim.lr_scheduler.StepLR" # type: ignore
def __init__(
self,
train_config: Optional[TrainerConfig] = None,
state: Optional[State] = None,
):
train_config = train_config or TrainerConfig()
state = state or State()
config = train_config.to_dict()
for attr in config:
setattr(self, attr, config[attr])
self.state = state
self.device = "cuda" if torch.cuda.device_count() else "cpu"
self.optimizer, self.scheduler = None, None
def train_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
batch: int = 0,
train_loss: Optional[float] = None,
) -> float:
"""Helper function for training on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
batch: int = 0
The current batch number.
train_loss: Optional[float] = None
The previous averaged training loss, None if it is the first batch.
Returns
-------
train_loss: float
The averaged training loss after the current batch.
"""
segment_sizes, features, gt_results = (
data[0].to(self.device),
data[1].to(self.device),
data[2].to(self.device),
)
self.optimizer.zero_grad()
pred_results = self.state.model(segment_sizes, features)
loss = lambda_rank_loss(pred_results, gt_results)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.state.model.parameters(), self.grad_clip_norm)
self.optimizer.step()
loss = loss.detach().cpu()
train_loss = (
train_loss * 0.95 + loss.item() * 0.05 if train_loss is not None else loss.item()
)
segment_sizes, features, gt_results, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
gt_results.detach().cpu(),
pred_results.detach().cpu(),
)
if batch % self.train_verbose == 0:
logger.info("Batch: %d, train loss: %6f", batch, train_loss)
return train_loss
def predict_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
):
"""Helper function for predicting (validating) on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
gt_results can be None if it is used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results for the current batch.
test_loss_batch: float
If used for validation, return the test loss for the current batch.
test_scores_batch: List[float]
If used for validation, return the topk scores for the current batch.
"""
test_loss_batch, test_scores_batch = None, []
segment_sizes, features = (
data[0].to(self.device),
data[1].to(self.device),
)
gt_results = data[2]
pred_results = self.state.model(segment_sizes, features)
segment_sizes, features, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
pred_results.detach().cpu(),
)
if gt_results is not None:
test_loss_batch = lambda_rank_loss(pred_results, gt_results).item()
for k in [1, 5, 10]:
test_scores_batch.append(topk_score(pred_results, gt_results, k))
return pred_results.numpy(), test_loss_batch, test_scores_batch
def train_full(self): # pylint: disable=too-many-locals
"""Training on the full dataset."""
# split into training and testing set
keys = list(self.state.data.keys())
test_keys = random.sample(keys, k=math.floor(len(keys) * self.test_split))
train_data = OrderedDict()
test_data = OrderedDict()
for key in keys:
if key in test_keys:
test_data[key] = self.state.data[key]
else:
train_data[key] = self.state.data[key]
train_features = list(
itertools_chain.from_iterable([g.features for g in train_data.values()])
)
test_features = list(
itertools_chain.from_iterable([g.features for g in test_data.values()])
)
train_results = np.concatenate([g.min_cost / g.costs for g in train_data.values()])
test_results = np.concatenate([g.min_cost / g.costs for g in test_data.values()])
train_loader = SegmentDataLoader(
train_features, train_results, batch_size=self.batch_size, shuffle=True
)
test_loader = SegmentDataLoader(
test_features, test_results, batch_size=self.batch_size, shuffle=False
)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer,
step_size=self.num_epoch_full // 10,
gamma=0.8,
verbose=True,
)
self.state.model = self.state.model.to(self.device)
min_test_loss = 1e10
logger.info("Training size: %d; Testing size: %d", len(train_loader), len(test_loader))
model_cache_path = tempfile.NamedTemporaryFile().name # pylint: disable=consider-using-with
for epoch in range(self.num_epoch_full):
logger.info("Epoch: %d", epoch)
# training
self.state.model.train()
train_loss = None
for batch, data in enumerate(train_loader):
train_loss = self.train_step(data, batch, train_loss)
self.scheduler.step()
# testing
if epoch % self.test_interval == 0:
self.state.model.eval()
test_losses, test_scores = [], []
for data in test_loader:
_, test_loss_batch, test_scores_batch = self.predict_step(data)
test_losses.append(test_loss_batch)
test_scores.append(test_scores_batch)
test_loss = (
np.array(test_losses[:-1]).mean() if len(test_losses) > 1 else test_losses[0]
)
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
test_loss,
np.array(test_scores)[:, 0].mean(),
np.array(test_scores)[:, 1].mean(),
np.array(test_scores)[:, 2].mean(),
)
if test_loss < min_test_loss:
min_test_loss = test_loss
torch.save(self.state.model.state_dict(), model_cache_path)
self.state.model.to("cpu").load_state_dict(torch.load(model_cache_path))
self.state.untrained_size = 0
def train_incremental(
self,
features: List[np.ndarray],
results: np.ndarray,
):
"""Training on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: np.ndarray
The measured results.
"""
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=True)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.state.model = self.state.model.to(self.device)
logger.info("Incremental training size: %d", len(loader))
for epoch in range(self.num_epoch_incremental):
logger.info("Epoch: %d", epoch)
self.state.model.train()
loss = None
for batch, data in enumerate(loader):
loss = self.train_step(data, batch, loss)
self.state.model.to("cpu")
self.state.untrained_size = max(0, self.state.untrained_size - len(loader))
def predict_incremental(
self,
features: List[np.ndarray],
results: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Predicting (validating) on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: Optional[np.ndarray]
The measured results, can be None if used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results.
"""
if results is not None:
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=False)
self.state.model = self.state.model.to(self.device).eval()
logger.info("Incremental testing size: %d", len(loader))
pred_results, losses, scores = [], [], []
for data in loader:
pred_results_batch, losses_batch, scores_batch = self.predict_step(data)
pred_results.append(pred_results_batch)
losses.append(losses_batch)
scores.append(scores_batch)
pred_results = np.concatenate(pred_results)
if results is not None:
losses = np.array(losses[:-1]).mean() if len(losses) > 1 else losses[0]
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
losses,
np.array(scores)[:, 0].mean(),
np.array(scores)[:, 1].mean(),
np.array(scores)[:, 2].mean(),
)
return pred_results
def update(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Update the dataset and re-train the model if not frozen.
Parameters
----------
features: List[np.ndarray]
The extracted features.
costs: np.ndarray
The measured results.
group_hash: str
The hash of the group.
"""
self.state.add_to_group(features, costs, group_hash)
if not self.frozen:
self.predict_incremental(features, costs)
if self.state.untrained_size / self.state.data_size > 0.2:
self.train_full()
else:
self.train_incremental(features, costs)
@derived_object
class MLPModel(PyCostModel):
"""Segment Sum MLP Model
Parameters
----------
trainer: SegmentSumMLPTrainer
The trainer for the model, handling the training interface.
"""
trainer: SegmentSumMLPTrainer
def __init__(
self,
*,
trainer: Optional[SegmentSumMLPTrainer] = None,
):
super().__init__()
self.trainer = trainer or SegmentSumMLPTrainer()
def load(self, path: str) -> None:
"""Load the cost model, cached data or raw data from given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.load(path)
def save(self, path: str) -> None:
"""Save the cost model and data to given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.save(path)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the dataset, re-train the cost model if not frozen.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
features, mean_costs = extract_features(
context, candidates, results, self.trainer.state.extractor
)
self.trainer.update(features, mean_costs, shash2hex(context.mod))
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict given the measure candidates.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
features, _ = extract_features(context, candidates, None, self.trainer.state.extractor)
pred_results = self.trainer.predict_incremental(features)
return pred_results
| 34,492 | 32.949803 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""XGBoost-based cost model"""
import os
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ..cost_model import PyCostModel
from ..feature_extractor import FeatureExtractor
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..utils import cpu_count, derived_object, shash2hex
from .metric import max_curve
if TYPE_CHECKING:
import xgboost as xgb # type: ignore
from xgboost.callback import TrainingCallback # type: ignore
from ..tune_context import TuneContext
logger = get_logger(__name__) # pylint: disable=invalid-name
def make_metric_sorter(focused_metric):
"""Make sure the focused metric is the first one."""
def metric_name_for_sort(name):
if focused_metric == name:
return "!" + name
return name
def sort_key(key):
key, _ = key
return metric_name_for_sort(key)
return sort_key
class PackSum:
"""The pack-sum format
Parameters
----------
dmatrix : xgb.DMatrix
A float64 array of shape [n, m],
where `n` is the packed number of blocks,
and `m` is the length of feature vector on each block
ids : np.ndarray
An int64 array of shape [n] containing nonnegative integers,
indicating which the index of a sample that a block belongs to
"""
dmatrix: "xgb.DMatrix" # type: ignore # pylint: disable=invalid-name
ids: np.ndarray
def __init__(
self,
xs: List[np.ndarray], # pylint: disable=invalid-name
ys: Optional[np.ndarray], # pylint: disable=invalid-name
):
"""Create PackSum format given a batch of samples
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : Optional[List[float]]
A batch of labels. None means no labels available.
"""
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
repeats = [x.shape[0] for x in xs]
xs = np.concatenate(xs, axis=0)
self.ids = np.concatenate([[i] * repeat for i, repeat in enumerate(repeats)], axis=0)
if ys is None:
self.dmatrix = xgb.DMatrix(data=xs, label=None)
else:
ys = np.concatenate([[y] * repeat for y, repeat in zip(ys, repeats)], axis=0)
self.dmatrix = xgb.DMatrix(data=xs, label=ys)
self.dmatrix.set_weight(ys)
def predict_with_score(self, pred: np.ndarray) -> np.ndarray:
"""Predict the labels given the block level prediction scores.
Parameters
----------
pred : np.ndarray
The block level predictions
Returns
-------
result : np.ndarray
The predictions for each candidate.
"""
return np.bincount(self.ids, weights=pred)
def obj_square_error(self, ys_pred: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
ys_pred: np.ndarray
The predictions
Returns
-------
gradient: np.ndarray
The gradient according to the xgboost format
hessian: np.ndarray
The hessian according to the xgboost format
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The gradient and hessian
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
gradient = ys_pred - ys
hessian = np.ones_like(gradient)
return gradient * ys, hessian * ys
def rmse(self, ys_pred: np.ndarray) -> Tuple[str, float]:
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw predictions
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The RMSE
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
square_error = np.square(ys_pred - ys)
rmse = np.sqrt(square_error.mean())
return "p-rmse", rmse
def average_peak_score(
self,
ys_pred: np.ndarray,
n: int,
) -> Tuple[str, float]:
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw prediction
n : int
The N in average-peak-score@N
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
ys = self.predict_with_score(ys) # type: ignore # pylint: disable=invalid-name
ys = ys / np.unique(self.ids, return_counts=True)[1] # type: ignore # pylint: disable=invalid-name
ys_pred = self.predict_with_score(ys_pred)
trials = np.argsort(ys_pred)[::-1][:n]
trial_scores = ys[trials]
curve = max_curve(trial_scores) / np.max(ys)
score = np.mean(curve)
return f"a-peak@{n}", score
class XGBConfig(NamedTuple):
"""XGBoost model configuration
Parameters
----------
max_depth : int
The maximum depth.
gamma : float
The gamma.
min_child_weight : float
The minimum child weight.
eta : float
The eta, learning rate.
seed : int
The random seed.
nthread : Optional[int],
The number of threads to use.
Default is None, which means to use physical number of cores.
"""
max_depth: int = 10
gamma: float = 0.001
min_child_weight: float = 0
eta: float = 0.2
seed: int = 43
nthread: Optional[int] = None
def to_dict(self):
return {
"max_depth": self.max_depth,
"gamma": self.gamma,
"min_child_weight": self.min_child_weight,
"eta": self.eta,
"seed": self.seed,
"nthread": self.nthread,
}
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append(
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
@derived_object
class XGBModel(PyCostModel):
"""XGBoost model
Parameters
----------
extractor : FeatureExtractor
The feature extractor for the model.
config : XGBConfig
The XGBoost model config.
num_warmup_samples : int
The number of samples that are used for warmup, i.e., the first few samples are predicted
with random results.
early_stopping_rounds : int
The number of rounds for early stopping.
verbose_eval : int
The verbose level when doing evaluation.
average_peak_n : int
The number to calculate average peak score.
adaptive_training : bool
Whether use adaptive training to reduce tuning time.
"""
# feature extractor
extractor: FeatureExtractor
# xgboost model config
config: XGBConfig
# behavior of randomness
num_warmup_samples: int
# evaluation
early_stopping_rounds: int
verbose_eval: int
average_peak_n: int
# states
data: Dict[str, FeatureGroup]
data_size: int
booster: Optional["xgb.Booster"]
# adaptive training
adaptive_training: bool
last_train_size: int
def __init__(
self,
*,
# feature extractor
extractor: FeatureExtractor.FeatureExtractorType = "per-store-feature",
# xgboost model config
config: XGBConfig = XGBConfig(),
# random result before enough samples
num_warmup_samples: int = 100,
# evaluation
early_stopping_rounds: int = 50,
verbose_eval: int = 25,
average_peak_n: int = 32,
adaptive_training: bool = True,
num_tuning_cores: Optional[int] = None,
):
super().__init__()
if not isinstance(extractor, FeatureExtractor):
extractor = FeatureExtractor.create(extractor)
# feature extractor
self.extractor = extractor
# model-related
if config.nthread is None:
# use physical core number
if num_tuning_cores is None:
config = config._replace(nthread=cpu_count(logical=False))
else:
config = config._replace(nthread=num_tuning_cores)
self.config = config
# behavior of randomness
self.num_warmup_samples = num_warmup_samples
# evaluation
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.average_peak_n = average_peak_n
# states
self.data = OrderedDict()
self.data_size = 0
self.booster = None
# adaptive training
self.adaptive_training = adaptive_training
self.last_train_size = 0
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method loads the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
import xgboost as xgb # pylint: disable=import-outside-toplevel
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Untar
untar(path, tmp_dir)
# Step 2. Load data
data = OrderedDict()
data_size = 0
for group_hash, features, costs in np.load(data_path, allow_pickle=True):
data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
data_size += len(costs)
# Step 3. Load the model
if os.path.exists(model_path):
booster = xgb.Booster()
booster.load_model(model_path)
else:
self.booster = None
self.data = data
self.data_size = data_size
self.booster = booster
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method saves the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Save the model
booster = self.booster
if booster is not None:
booster.save_model(model_path)
else:
model_path = None
# Step 2. Save data
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=data_path,
arr=np.array(data, dtype=object),
)
# Step 3. Tar it
tar(path, [x for x in [model_path, data_path] if x is not None])
logger.info("Saved XGBModel to %s", path)
def update(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
assert len(candidates) == len(results)
if len(candidates) == 0:
return
# Step 1. Get the feature group
new_group_hash = shash2hex(context.mod)
group = self.data.get(new_group_hash, None)
# Step 2. Extract features
def _feature(x: NDArray) -> np.ndarray:
return x.numpy().astype("float32")
def _mean_cost(x: RunnerResult) -> float:
if not x.run_secs:
return 1e10
return float(np.median([float(s) for s in x.run_secs]))
new_features = [_feature(x) for x in self.extractor.extract_from(context, candidates)]
new_mean_costs = [_mean_cost(x) for x in results]
# Filter instances with no features
new_mean_costs = [c for i, c in enumerate(new_mean_costs) if len(new_features[i]) != 0]
new_mean_costs_np = np.array(new_mean_costs).astype("float32")
new_features = [f for f in new_features if len(f) != 0]
if not new_features:
return
# Steps 3. Run validation
if group is not None and self.booster is not None:
logger.debug(
"XGB validation: %s",
"\t".join(
f"{key}: {score:.6f}"
for key, score in self._validate(
xs=new_features,
ys=group.min_cost / new_mean_costs_np,
)
),
)
# Step 4. Add the features into the data points
if group is None:
group = FeatureGroup(
group_hash=new_group_hash,
features=new_features,
costs=new_mean_costs_np,
)
else:
group.append(new_features, new_mean_costs_np)
self.data[new_group_hash] = group
self.data_size += len(new_features)
if (
self.adaptive_training
and self.data_size - self.last_train_size < self.last_train_size / 5
):
# Set a training threshold related to `last_train_size` to reduce the training
# overhead when there're too many results
return
self.last_train_size = self.data_size
# Step 5. Re-train the model
self._train(
xs=list(itertools_chain.from_iterable([g.features for g in self.data.values()])),
ys=np.concatenate(
[g.min_cost / g.costs for g in self.data.values()],
axis=0,
),
)
def predict(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
) -> np.ndarray:
"""Predict the normalized score using the cost model.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
if self.data_size >= self.num_warmup_samples and self.booster is not None:
ret = self._predict(
xs=[
x.numpy().astype("float32")
for x in self.extractor.extract_from(
context,
candidates,
)
]
)
else:
ret = np.random.uniform(
low=0,
high=1,
size=(len(candidates),),
)
return ret.astype("float64")
def _train( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> None:
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
self.d_train = PackSum(xs=xs, ys=ys)
def obj(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.obj_square_error(ys_pred)
def rmse(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.rmse(ys_pred)
def avg_peak_score(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.average_peak_score(ys_pred, self.average_peak_n)
self.booster = xgb.train(
self.config.to_dict(),
self.d_train.dmatrix,
num_boost_round=10000,
obj=obj,
callbacks=[
_get_custom_call_back(
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
fevals=[rmse, avg_peak_score],
evals=[(self.d_train.dmatrix, "tr")],
cvfolds=None,
)
],
)
del self.d_train
def _predict( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
) -> np.ndarray:
d_test = PackSum(xs=xs, ys=None)
pred = self.booster.predict(d_test.dmatrix)
ret = d_test.predict_with_score(pred)
return ret
def _validate( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> List[Tuple[str, float]]:
"""Evaluate the score of inputs.
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : List[float]
A batch of labels
Returns
-------
scores: np.ndarray
The predicted result for all inputs.
"""
assert self.booster is not None
d_valid = PackSum(xs=xs, ys=ys)
def average_peak_score(ys_pred: np.ndarray):
return d_valid.average_peak_score(ys_pred, n=self.average_peak_n)
ys_pred = self.booster.predict(d_valid.dmatrix)
eval_result: List[Tuple[str, float]] = [
feval(ys_pred)
for feval in (
average_peak_score,
d_valid.rmse,
)
]
eval_result.sort(key=make_metric_sorter("p-rmse"))
return eval_result
def _get_custom_call_back(
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
) -> "TrainingCallback":
"""Get a customized callback function for XGBoost. Work around xgboost import."""
def optional_xgboost_callback(cls):
"""Decorator for importing TrainingCallback from xgboost"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import TrainingCallback # type: ignore
# pylint:enable = import-outside-toplevel
except ImportError:
class TrainingCallback: # type: ignore
pass
class OptXGBoostCustomCallback(cls, TrainingCallback): # type: ignore
pass
return OptXGBoostCustomCallback
@optional_xgboost_callback
class XGBoostCustomCallback:
"""Custom callback class for xgboost to support multiple custom evaluation functions"""
def __init__(
self,
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
):
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.fevals = fevals
self.evals = evals
self.state: Dict[str, Any] = {}
self.focused_metric = focused_metric
self.sort_key = make_metric_sorter(focused_metric=focused_metric)
self.cvfolds = cvfolds
if cvfolds is not None:
self.aggregated_cv = None
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def init(self, model: "xgb.Booster"):
"""Internal function for initialization"""
booster: "xgb.Booster" = model
self.state["best_iteration"] = 0
self.state["best_score"] = float("inf")
if booster is None:
assert self.cvfolds is not None
return
if booster.attr("best_score") is not None:
self.state["best_score"] = float(booster.attr("best_score"))
self.state["best_iteration"] = int(booster.attr("best_iteration"))
self.state["best_msg"] = booster.attr("best_msg")
else:
booster.set_attr(best_iteration=str(self.state["best_iteration"]))
booster.set_attr(best_score=str(self.state["best_score"]))
def after_iteration(
self, model: "xgb.Booster", epoch: int, evals_log: Dict
): # pylint: disable = unused-argument
"""Internal function for after_iteration"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
import xgboost as xgb
from xgboost import rabit # type: ignore
try:
from xgboost.training import aggcv # type: ignore
except ImportError:
from xgboost.callback import _aggcv as aggcv # type: ignore
# pylint:enable = import-outside-toplevel
if not self.state:
self.init(model)
booster: xgb.Booster = model
iteration: int = epoch
cvfolds: List[xgb.training.CVPack] = self.cvfolds
##### Evaluation #####
# `eval_result` is a list of (key, score)
eval_result: List[Tuple[str, float]] = []
if cvfolds is None:
eval_result = list(
itertools_chain.from_iterable(
[
(key, float(value))
for key, value in map(
lambda x: x.split(":"),
booster.eval_set(
evals=self.evals,
iteration=iteration,
feval=feval,
).split()[1:],
)
]
for feval in self.fevals
)
)
else:
eval_result = list(
itertools_chain.from_iterable(
[
(key, score)
for key, score, _std in aggcv(
fold.eval(
iteration=iteration,
feval=feval,
)
for fold in cvfolds
)
]
for feval in self.fevals
)
)
eval_result = list(eval_result)
eval_result.sort(key=self.sort_key)
##### Print eval result #####
if self.verbose_eval and iteration % self.verbose_eval == 0:
info = []
for key, score in eval_result:
if "null" not in key:
info.append(f"{key}: {score:.6f}")
logger.debug("XGB iter %3d: %s", iteration, "\t".join(info))
##### Choose score and do early stopping #####
score = None
for key, _score in eval_result:
if key == self.focused_metric:
score = _score
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
if score < best_score:
tab = "\t" # to work with f-string
msg = f"[{epoch}] {tab.join([_fmt_metric(x) for x in eval_result])}"
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.early_stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval and rabit.get_rank() == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
# instead of raising EarlyStopException, returning True to end the training
return True
# False to indicate training should not stop.
return False
return XGBoostCustomCallback(
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
fevals=fevals,
evals=evals,
focused_metric=focused_metric,
cvfolds=cvfolds,
)
| 28,769 | 33.047337 | 124 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/random_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Random cost model
"""
from typing import List, Optional, Tuple, Union
from ..cost_model import PyCostModel
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object # type: ignore
@derived_object
class RandomModel(PyCostModel):
"""Random cost model
Parameters
----------
random_state : Union[Tuple[str, np.ndarray, int, int, float], dict]
The random state of the random number generator.
path : Optional[str]
The path of the random cost model.
max_range : Optional[int]
The maximum range of random results, [0, max_range].
Reference
---------
https://numpy.org/doc/stable/reference/random/generated/numpy.random.get_state.html
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
random_state: Union[Tuple[str, np.ndarray, int, int, float], dict]
path: Optional[str]
def __init__(
self,
*,
seed: Optional[int] = None,
path: Optional[str] = None,
max_range: Optional[int] = 100,
):
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
super().__init__()
if path is not None:
self.load(path)
else:
np.random.seed(seed)
self.random_state = np.random.get_state()
self.max_range = max_range
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
self.random_state = tuple(np.load(path, allow_pickle=True)) # type: ignore
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.save(path, np.array(self.random_state, dtype=object), allow_pickle=True)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
def predict(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> np.ndarray: # type: ignore # pylint: disable=used-before-assignment
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted running results.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.random.set_state(self.random_state)
# TODO(@zxybazh): Use numpy's RandState object:
# https://numpy.org/doc/1.16/reference/generated/numpy.random.RandomState.html#numpy.random.RandomState
result = np.random.rand(len(candidates)) * self.max_range # type: ignore
self.random_state = np.random.get_state()
return result
| 4,437 | 31.632353 | 111 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.cost_model package.
"""
from .cost_model import CostModel, PyCostModel
from .random_model import RandomModel
from .xgb_model import XGBModel
| 952 | 40.434783 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule CostModel."""
import ctypes
from typing import Callable, List, Union
# isort: off
from typing_extensions import Literal
# isort: on
import numpy as np # type: ignore
from tvm._ffi import register_object
from tvm.runtime import Object
from .. import _ffi_api
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import _get_default_str
@register_object("meta_schedule.CostModel")
class CostModel(Object):
"""Cost model."""
CostModelType = Union["CostModel", Literal["xgb", "mlp", "random"]]
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
_ffi_api.CostModelLoad(self, path) # type: ignore # pylint: disable=no-member
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
_ffi_api.CostModelSave(self, path) # type: ignore # pylint: disable=no-member
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
_ffi_api.CostModelUpdate(self, context, candidates, results) # type: ignore # pylint: disable=no-member
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict normalized score with the cost model.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
n = len(candidates)
results = np.zeros(shape=(n,), dtype="float64")
_ffi_api.CostModelPredict( # type: ignore # pylint: disable=no-member
self,
context,
candidates,
results.ctypes.data_as(ctypes.c_void_p),
)
return results
@staticmethod
def create(
kind: Literal["xgb", "mlp", "random", "none"],
*args,
**kwargs,
) -> "CostModel":
"""Create a CostModel.
Parameters
----------
kind : Literal["xgb", "mlp", "random", "none"]
The kind of the cost model. Can be "xgb", "mlp", "random" or "none".
Returns
-------
cost_model : CostModel
The created cost model.
"""
from . import RandomModel, XGBModel # pylint: disable=import-outside-toplevel
if kind == "xgb":
return XGBModel(*args, **kwargs) # type: ignore
if "num_tuning_cores" in kwargs:
# num_tuning_cores is only relevant for XGBModel.
kwargs.pop("num_tuning_cores")
if kind == "random":
return RandomModel(*args, **kwargs) # type: ignore
if kind == "mlp":
from .mlp_model import ( # type: ignore # pylint: disable=import-outside-toplevel
MLPModel,
)
return MLPModel(*args, **kwargs) # type: ignore
if kind == "none":
return None # no cost model required
raise ValueError(f"Unknown CostModel: {kind}")
create = CostModel.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyCostModel")
class _PyCostModel(CostModel):
"""
A TVM object cost model to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyCostModel
"""
def __init__(
self,
f_load: Callable = None,
f_save: Callable = None,
f_update: Callable = None,
predict_func: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
def f_predict(context: TuneContext, candidates: List[MeasureCandidate], return_ptr) -> None:
n = len(candidates)
return_ptr = ctypes.cast(return_ptr, ctypes.POINTER(ctypes.c_double))
array_wrapper = np.ctypeslib.as_array(return_ptr, shape=(n,))
res = predict_func(context, candidates)
array_wrapper[:] = res
assert (
array_wrapper.dtype == "float64"
), "ValueError: Invalid data type returned from CostModel Predict!"
self.__init_handle_by_constructor__(
_ffi_api.CostModelPyCostModel, # type: ignore # pylint: disable=no-member
f_load,
f_save,
f_update,
f_predict,
f_as_string,
)
class PyCostModel:
"""
An abstract cost model with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyCostModel,
"methods": ["load", "save", "update", "predict", "__str__"],
}
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
raise NotImplementedError
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
raise NotImplementedError
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
raise NotImplementedError
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict given the measure candidates.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the cost model as string with name.
Return
------
result : str
Get the cost model as string with name.
"""
return _get_default_str(self)
| 8,007 | 28.992509 | 112 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/metric.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Cost model metrics for meta schedule"""
import numpy as np # type: ignore
def max_curve(trial_scores: np.ndarray) -> np.ndarray:
"""f(n) = max([s[i] fo i < n])
Parameters
----------
trial_scores : List[float]
the score of i-th trial
Returns
-------
curve : np.ndarray
A vector, the max-curve function values
"""
ret = np.empty(len(trial_scores))
keep = -1e9
for i, score in enumerate(trial_scores):
keep = max(keep, score)
ret[i] = keep
return ret
| 1,321 | 32.05 | 62 | py |
tvm | tvm-main/python/tvm/meta_schedule/space_generator/post_order_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Post Order Apply Space Generator."""
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.PostOrderApply")
class PostOrderApply(SpaceGenerator):
"""
PostOrderApply is the design space generator that generates design spaces by applying schedule
rules to blocks in post-DFS order.
Parameters
----------
f_block_filter : Optional[function]
An optional callback function that is used to filter which blocks have schedules generated
for them. The function should take in a block and return True if a schedule should
be generated or False if that block should be skipped. If no function is provided
all blocks will have schedules generated.
"""
def __init__(
self,
f_block_filter=None,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorPostOrderApply, # type: ignore # pylint: disable=no-member
f_block_filter,
sch_rules,
postprocs,
mutator_probs,
)
| 2,271 | 36.245902 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/space_generator/space_generator_union.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Union of meta Schedule design space generators."""
from typing import List
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.SpaceGeneratorUnion")
class SpaceGeneratorUnion(SpaceGenerator):
"""Union of design space generators."""
def __init__(
self,
space_generators: List[SpaceGenerator],
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor.
Parameters
----------
space_generators : List[SpaceGenerator]
The list of design space generators to be unioned.
"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorSpaceGeneratorUnion, # type: ignore # pylint: disable=no-member
space_generators,
sch_rules,
postprocs,
mutator_probs,
)
| 1,995 | 33.413793 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/space_generator/space_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule design space generators that generates design
space for generation of measure candidates.
"""
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
if TYPE_CHECKING:
from ..mutator import Mutator
from ..postproc import Postproc
from ..schedule_rule import ScheduleRule
from ..tune_context import TuneContext
@register_object("meta_schedule.SpaceGenerator")
class SpaceGenerator(Object):
"""The abstract design space generator interface."""
ScheduleFnType = Union[
Callable[[Schedule], None], # No output
Callable[[Schedule], Schedule], # Single output
Callable[[Schedule], List[Schedule]], # Multiple outputs
]
SpaceGeneratorType = Union[
"SpaceGenerator",
ScheduleFnType,
Literal["post-order-apply", "union"],
]
sch_rules: Optional[List["ScheduleRule"]]
postprocs: Optional[List["Postproc"]]
mutator_probs: Optional[Dict["Mutator", float]]
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the design space generator with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initializing the design space generator.
"""
_ffi_api.SpaceGeneratorInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def generate_design_space(self, mod: IRModule) -> List[Schedule]:
"""Generate design spaces given a module.
Parameters
----------
mod : IRModule
The module used for design space generation.
Returns
-------
design_spaces : List[tvm.tir.Schedule]
The generated design spaces, i.e., schedules.
"""
return _ffi_api.SpaceGeneratorGenerateDesignSpace(self, mod) # type: ignore # pylint: disable=no-member
def clone(self) -> "SpaceGenerator":
"""Clone the design space generator.
Returns
-------
cloned_sg : SpaceGenerator
The cloned design space generator.
"""
return _ffi_api.SpaceGeneratorClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Union[
Literal["post-order-apply", "union"],
ScheduleFnType,
] = "post-order-apply",
*args,
**kwargs,
) -> "SpaceGenerator":
"""Create a design space generator."""
from . import ( # pylint: disable=import-outside-toplevel
PostOrderApply,
ScheduleFn,
SpaceGeneratorUnion,
)
if callable(kind):
def create_schedule_fn(
func,
sch_rules=[],
postprocs=[],
mutator_probs={},
): # pylint: disable=dangerous-default-value
return ScheduleFn(func, sch_rules, postprocs, mutator_probs)
return create_schedule_fn(kind, *args, **kwargs) # type: ignore
if kind == "post-order-apply":
return PostOrderApply(*args, **kwargs)
if kind == "union":
return SpaceGeneratorUnion(*args, **kwargs)
if isinstance(kind, str):
return PostOrderApply(sch_rules=kind, postprocs=kind, mutator_probs=kind)
raise ValueError(f"Unknown SpaceGenerator: {kind}")
ScheduleFnType = SpaceGenerator.ScheduleFnType
ScheduleRuleType = Union[
List["ScheduleRule"],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
PostprocType = Union[
List["Postproc"],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
MutatorProbType = Union[
Dict["Mutator", float],
Literal["llvm", "cuda", "cuda-tensorcore", "hexagon", "from-target"],
]
create = SpaceGenerator.create # pylint: disable=invalid-name
def _normalize_rules(
sch_rules: ScheduleRuleType,
postprocs: PostprocType,
mutator_probs: MutatorProbType,
) -> Tuple[
Optional[List["ScheduleRule"]],
Optional[List["Postproc"]],
Optional[Dict["Mutator", float]],
]:
# pylint: disable=import-outside-toplevel
from ..mutator import Mutator
from ..postproc import Postproc
from ..schedule_rule import ScheduleRule
# pylint: enable=import-outside-toplevel
assert sch_rules is not None
assert postprocs is not None
assert mutator_probs is not None
if isinstance(sch_rules, str):
if sch_rules == "from-target":
sch_rules = None
else:
sch_rules = ScheduleRule.create(sch_rules)
if isinstance(postprocs, str):
if postprocs == "from-target":
postprocs = None
else:
postprocs = Postproc.create(postprocs)
if isinstance(mutator_probs, str):
if mutator_probs == "from-target":
mutator_probs = None
else:
mutator_probs = Mutator.create(mutator_probs)
return sch_rules, postprocs, mutator_probs # type: ignore
@register_object("meta_schedule.PySpaceGenerator")
class _PySpaceGenerator(SpaceGenerator):
"""
A TVM object space generator to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PySpaceGenerator
"""
def __init__(
self,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
f_initialize_with_tune_context: Optional[Callable] = None,
f_generate_design_space: Optional[Callable] = None,
f_clone: Optional[Callable] = None,
):
"""Constructor."""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorPySpaceGenerator, # type: ignore # pylint: disable=no-member
sch_rules,
postprocs,
mutator_probs,
f_initialize_with_tune_context,
f_generate_design_space,
f_clone,
)
class PySpaceGenerator:
"""
An abstract space generator with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PySpaceGenerator,
"fields": ["sch_rules", "postprocs", "mutator_probs"],
"methods": ["_initialize_with_tune_context", "generate_design_space", "clone"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the design space generator with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initializing the design space generator.
"""
raise NotImplementedError
def generate_design_space(self, mod: IRModule) -> List[Schedule]:
"""Generate design spaces given a module.
Parameters
----------
mod : IRModule
The module used for design space generation.
Returns
-------
design_spaces : List[tvm.tir.Schedule]
The generated design spaces, i.e., schedules.
"""
raise NotImplementedError
def clone(self) -> SpaceGenerator:
"""Clone the design space generator.
Returns
-------
cloned_sg : SpaceGenerator
The cloned design space generator.
"""
raise NotImplementedError
| 8,726 | 31.685393 | 112 | py |
tvm | tvm-main/python/tvm/meta_schedule/space_generator/schedule_fn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Union of meta Schedule design space generators."""
from tvm._ffi import register_object
from .. import _ffi_api
from .space_generator import (
MutatorProbType,
PostprocType,
ScheduleRuleType,
SpaceGenerator,
_normalize_rules,
)
@register_object("meta_schedule.ScheduleFn")
class ScheduleFn(SpaceGenerator):
"""Create a design space generator with customized schedule function.
The schedule function can have the following signatures:
- 1) [Schedule] -> None
- 2) [Schedule] -> Schedule
- 3) [Schedule] -> List[Schedule]
"""
def __init__(
self,
sch_fn: SpaceGenerator.ScheduleFnType,
sch_rules: ScheduleRuleType = "from-target",
postprocs: PostprocType = "from-target",
mutator_probs: MutatorProbType = "from-target",
):
"""Constructor.
Parameters
----------
sch_fn : SpaceGenerator.ScheduleFnType
The schedule function, which can have the following signatures:
- 1) [Schedule] -> None
- 2) [Schedule] -> Schedule
- 3) [Schedule] -> List[Schedule]
"""
sch_rules, postprocs, mutator_probs = _normalize_rules(sch_rules, postprocs, mutator_probs)
self.__init_handle_by_constructor__(
_ffi_api.SpaceGeneratorScheduleFn, # type: ignore # pylint: disable=no-member
sch_fn,
sch_rules,
postprocs,
mutator_probs,
)
| 2,263 | 34.375 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/space_generator/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.space_generator package.
Meta Schedule design space generators that generates design
space for generation of measure candidates.
"""
from .post_order_apply import PostOrderApply
from .schedule_fn import ScheduleFn
from .space_generator import PySpaceGenerator, ScheduleFnType, SpaceGenerator, create
from .space_generator_union import SpaceGeneratorUnion
from ...target import x86
| 1,193 | 41.642857 | 85 | py |
tvm | tvm-main/python/tvm/target/target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target data structure."""
import json
import re
import warnings
import tvm._ffi
from tvm._ffi import register_func as _register_func
from tvm.runtime import Object, convert
from tvm.runtime.container import String
from tvm.ir.container import Map, Array
from . import _ffi_api
@tvm._ffi.register_object
class TargetKind(Object):
"""Kind of a compilation target"""
@property
def options(self):
"""Returns the dict of available option names and types"""
return dict(_ffi_api.ListTargetKindOptions(self))
@staticmethod
def options_from_name(kind_name: str):
"""Returns the dict of available option names and types from a name of TargetKind"""
return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name))
class TargetFeatures:
def __init__(self, target):
self.target = target
def __getattr__(self, name: str):
return _ffi_api.TargetGetFeature(self.target, name)
@tvm._ffi.register_object
class Target(Object):
"""Target device information, use through TVM API.
Note
----
You can create target using the constructor or the following functions
- :py:func:`tvm.target.arm_cpu` create arm_cpu target
- :py:func:`tvm.target.cuda` create CUDA target
- :py:func:`tvm.target.rocm` create ROCM target
- :py:func:`tvm.target.mali` create Mali target
- :py:func:`tvm.target.intel_graphics` create Intel Graphics target
"""
def __init__(self, target, host=None):
"""Construct a TVM target object from
1) Raw target string
2) Target config dict
3) Target tag
Parameters
----------
target : Union[str, Dict[str, Any]]
Can be one of a literal target string, a json string describing
a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the
possible values are:
kind : str (required)
Which codegen path to use, for example 'llvm' or 'cuda'.
keys : List of str (optional)
A set of strategies that can be dispatched to. When using
"kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"].
device : str (optional)
A single key that corresponds to the actual device being run on.
This will be effectively appended to the keys.
libs : List of str (optional)
The set of external libraries to use. For example ['cblas', 'mkl'].
system-lib : bool (optional)
If True, build a module that contains self registered functions.
Useful for environments where dynamic loading like dlopen is banned.
mcpu : str (optional)
The specific cpu being run on. Serves only as an annotation.
model : str (optional)
An annotation indicating what model a workload came from.
runtime : str (optional)
An annotation indicating which runtime to use with a workload.
mtriple : str (optional)
The llvm triplet describing the target, for example "arm64-linux-android".
mattr : List of str (optional)
The llvm features to compile with, for example ["+avx512f", "+mmx"].
mfloat-abi : str (optional)
An llvm setting that is one of 'hard' or 'soft' indicating whether to use
hardware or software floating-point operations.
mabi : str (optional)
An llvm setting. Generate code for the specified ABI, for example "lp64d".
host : Union[str, Dict[str, Any]] (optional)
Description for target host. Can be recursive. Similar to target.
host : Optional[Union[str, Dict[str, Any]]]
Similar to target but for target host. Can be one of a literal target host string,
a json string describing a configuration, or a dictionary of configuration options.
When using a dictionary or json string to configure target, the possible values are
same as target.
"""
if isinstance(target, str) and "-libs=mkldnn" in target:
target = target.replace("mkldnn", "dnnl")
warnings.warn(
"Legacy support of mkldnn is going to be deprecated. "
"Please use -libs=dnnl instead.",
)
if isinstance(target, (dict, str)):
target = convert(target)
if isinstance(host, (dict, str)):
host = convert(host)
if target is None or not isinstance(target, (Map, String, Target)):
raise ValueError("target has to be a string or dictionary.")
if host is not None:
if not isinstance(host, (Map, String, Target)):
raise ValueError("target host has to be a string or dictionary.")
self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host))
else:
self.__init_handle_by_constructor__(_ffi_api.Target, target)
def __enter__(self):
_ffi_api.TargetEnterScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.TargetExitScope(self)
def export(self):
return _ffi_api.TargetExport(self)
def with_host(self, host=None):
return _ffi_api.WithHost(self, Target(host))
@staticmethod
def current(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
return _ffi_api.TargetCurrent(allow_none)
@property
def arch(self):
"""Returns the cuda arch from the target if it exists."""
return str(self.attrs.get("arch", ""))
@property
def max_num_threads(self):
"""Returns the max_num_threads from the target if it exists."""
return int(self.attrs["max_num_threads"])
@property
def max_block_size_x(self):
"""Returns the max block size in x-dimension from the target if it exists."""
return int(self.attrs["max_block_size_x"])
@property
def max_block_size_y(self):
"""Returns the max block size in y-dimension from the target if it exists."""
return int(self.attrs["max_block_size_y"])
@property
def thread_warp_size(self):
"""Returns the thread_warp_size from the target if it exists."""
return int(self.attrs["thread_warp_size"])
@property
def max_shared_memory_per_block(self):
return int(self.attrs["max_shared_memory_per_block"])
@property
def max_function_args(self):
return int(self.attrs.get("max_function_args", -1))
@property
def vtcm_capacity(self):
return int(self.attrs.get("vtcm-capacity", 0))
@property
def device_name(self):
return str(self.attrs.get("device", ""))
@property
def model(self):
"""Returns model from the target if it exists."""
return str(self.attrs.get("model", "unknown"))
@property
def mcpu(self):
"""Returns the mcpu from the target if it exists."""
return str(self.attrs.get("mcpu", ""))
@property
def mattr(self):
"""Returns the mattr from the target if it exists."""
return list(self.attrs.get("mattr", []))
@property
def supports_integer_dot_product(self):
if self.attrs.get("supports_integer_dot_product", []):
return bool(self.attrs["supports_integer_dot_product"])
if self.kind.name == "cuda":
sm_version = int(self.arch.split("_")[1])
if sm_version >= 61:
return True
return False
@property
def libs(self):
return list(self.attrs.get("libs", []))
@property
def supports_cooperative_matrix(self):
if self.attrs.get("supports_cooperative_matrix", []):
return bool(self.attrs["supports_cooperative_matrix"])
else:
return False
@property
def features(self):
return TargetFeatures(self)
@property
def l2_cache_size_bytes(self):
return int(self.attrs.get("l2_cache_size_bytes", 0))
def get_kind_attr(self, attr_name):
"""Get additional attribute about the target kind.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.TargetKindGetAttr(self.kind, attr_name)
def get_target_device_type(self):
"""Returns the device_type for this target."""
return _ffi_api.TargetGetDeviceType(self)
@staticmethod
def list_kinds():
"""Returns the list of available target names."""
return list(_ffi_api.ListTargetKinds())
@staticmethod
def canon_target(target):
"""Given a single target-like object, returns the TVM Target object representing it.
Can convert from:
- None (to None).
- An existing TVM Target object.
- A string, eg "cuda" or "cuda -arch=sm_80"
- A Python dictionary, eg {"kind": "cuda", "arch": "sm_80" }
"""
if target is None:
return None
if isinstance(target, Target):
return target
return Target(target)
@staticmethod
def canon_target_and_host(target, target_host=None):
"""Returns a TVM Target capturing target and target_host. Also returns the host in
canonical form. The given target can be in any form recognized by
Target.canon_target. If given, target_host can be in any form recognized by
Target.canon_target. If target_host is given it will be set as the 'host' in the
result Target object (and a warning given).
Note that this method does not support heterogeneous compilation targets.
"""
target = Target.canon_target(target)
if target is None:
assert target_host is None, "Target host is not empty when target is empty."
return target, target_host
if target.host is None and target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target_host = Target.canon_target(target_host)
target = target.with_host(target_host)
if target is not None:
# In case the target already had a host, extract it here.
target_host = target.host
return target, target_host
@staticmethod
def canon_multi_target(multi_targets):
"""Given a single target-like object, or a collection-like object of target-like objects,
returns a TVM Array of TVM Target objects representing then. Can convert from:
- None (to None).
- A single target-like object in a form recognized by canon_target.
- A Python list or TVM Array of target-like objects in a form recognized by
canon_target.
- A Python dict or TVM Map from TVM IntImm objects representing device types to
a target-like object in a form recognized by canon_target. (This is a legacy
method to represent heterogeneous targets. The keys are ignored.)
"""
if multi_targets is None:
return None
if isinstance(multi_targets, (dict, Map)) and "kind" not in multi_targets:
# Convert legacy heterogeneous map representation to ordinary list of targets.
return Target.canon_multi_target(list(multi_targets.values()))
if isinstance(multi_targets, (list, Array)):
# Multiple Target results.
return convert([Target.canon_target(tgt) for tgt in multi_targets])
# Single Target result.
return convert([Target.canon_target(multi_targets)])
@staticmethod
def canon_multi_target_and_host(target, target_host=None):
"""Returns a TVM Array<Target> capturing target and target_host. The given target can be in
any form recognized by Target.canon_multi_target. If given, target_host can be in
any form recognized by Target.canon_target. If target_host is given it will be set
as the 'host' in each result Target object (and a warning given).
"""
# Convert target to Array<Target>, but not yet accounting for any host.
raw_targets = Target.canon_multi_target(target)
assert raw_targets is not None and len(raw_targets) > 0
# Convert host to Target, if given.
if raw_targets[0].host is None and target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
# Make sure the (canonical) host is captured in all the (canonical) targets.
target_host = Target.canon_target(target_host)
raw_targets = convert([tgt.with_host(target_host) for tgt in raw_targets])
return raw_targets
@staticmethod
def canon_target_map_and_host(target_map, target_host=None):
"""Returns target_map as a map from TVM Target's in canonical form to IRModules. The keys
of the input target_map can be in any form recognized by Target.canon_target.
Similarly, if given, target_host can be in any form recognized by
Target.canon_target. The final target_map keys will capture the target_host in
canonical form. Also returns the target_host in canonical form."""
new_target_map = {}
canonical_target_host = None
for tgt, mod in target_map.items():
tgt = Target.canon_target(tgt)
assert tgt is not None
if canonical_target_host is None:
if tgt.host is not None:
canonical_target_host = tgt.host
elif target_host is not None:
# No deprecation warning in this case since host may have been manufactured
# behind the scenes in build_module.py build.
canonical_target_host = Target.canon_target(target_host)
if tgt.host is None and canonical_target_host is not None:
tgt = tgt.with_host(canonical_target_host)
new_target_map[tgt] = mod
return new_target_map, canonical_target_host
@staticmethod
def target_or_current(target):
"""Returns target, or the current target in the environment if target is None"""
if target is None:
target = Target.current()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
return target
# TODO(@tvm-team): Deprecate the helper functions below. Encourage the usage of config dict instead.
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model="unknown", arch=None, options=None):
"""Returns a cuda target.
Parameters
----------
model: str
The model of cuda device (e.g. 1080ti)
arch: str
The cuda architecture (e.g. sm_61)
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
if arch:
opts = _merge_opts(["-arch=%s" % arch], opts)
if not any(["-arch" in opt for opt in opts]):
warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.")
return Target(" ".join(["cuda"] + opts))
def rocm(model="unknown", options=None):
"""Returns a ROCM target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return Target(" ".join(["rocm"] + opts))
def mali(model="unknown", options=None):
"""Returns a ARM Mali GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=mali", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def intel_graphics(model="unknown", options=None):
"""Returns an Intel Graphics target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
MICRO_SUPPORTED_MODELS = {
"host": [],
"atsamd51": ["-mcpu=cortex-m4"],
"cxd5602gg": ["-mcpu=cortex-m4"],
"esp32": [],
"imxrt10xx": ["-mcpu=cortex-m7"],
"mps2_an521": ["-mcpu=cortex-m33"],
"mps3_an547": ["-mcpu=cortex-m55"],
"nrf52840": ["-mcpu=cortex-m4+nodsp"],
"nrf5340dk": ["-mcpu=cortex-m33"],
"rp2040": ["-mcpu=cortex-m0"],
"sam3x8e": ["-mcpu=cortex-m3"],
"stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32h7xx": ["-mcpu=cortex-m7"],
"stm32l4r5zi": ["-mcpu=cortex-m4"],
"stm32u5xx": ["-mcpu=cortex-m33"],
"zynq_mp_r5": ["-mcpu=cortex-r5"],
}
def micro(model="unknown", options=None):
"""Returns a microTVM target.
Parameters
----------
model : str
Canonically identifies the target device. This is typically a device board level name.
The allowed values are MICRO_SUPPORTED_MODELS.keys().
options : str or list of str
Additional options
"""
if model not in MICRO_SUPPORTED_MODELS:
raise ValueError(f"Model {model} not supported by tvm.target.micro.")
opts = _merge_opts(
MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"],
options,
)
# NOTE: in the future, the default micro target will be LLVM except when
# external dependencies are present.
return Target(" ".join(["c"] + opts))
def arm_cpu(model="unknown", options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
Parameters
----------
model: str
SoC name or phone name of the arm board.
options : str or list of str
Additional options
"""
trans_table = {
"pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"],
"rasp4b": [
"-model=bcm2711",
"-mtriple=armv8l-linux-gnueabihf",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rasp4b64": [
"-model=bcm2711",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon",
"-mcpu=cortex-a72",
],
"rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"],
"ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"],
"beagleai": [
"-model=beagleai",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a15",
],
"stm32mp1": [
"-model=stm32mp1",
"-mtriple=armv7a-linux-gnueabihf",
"-mattr=+neon,+vfp4,+thumb2",
"-mcpu=cortex-a7",
],
"thunderx": [
"-model=thunderx",
"-mtriple=aarch64-linux-gnu",
"-mattr=+neon,+crc,+lse",
"-mcpu=thunderxt88",
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-keys=arm_cpu,cpu", "-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def rasp(options=None):
"""Return a Raspberry 3b target.
Parameters
----------
options : str or list of str
Additional options
"""
warnings.warn(
"tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")'
)
return arm_cpu("rasp3b", options)
def vta(model="unknown", options=None):
opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["ext_dev"] + opts))
def bifrost(model="unknown", options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def riscv_cpu(model="sifive-u54", options=None):
"""Returns a RISC-V CPU target.
Default: sifive-u54 rv64gc
Parameters
----------
model: str
CPU name.
options : str or list of str
Additional options
"""
trans_table = {
"sifive-e31": [
"-model=sifive-e31",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e31",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imac -mabi=ilp32 -mcpu=sifive-e31
],
"sifive-e76": [
"-model=sifive-e76",
"-mtriple=riscv32-unknown-linux-gnu",
"-mcpu=sifive-e76",
"-mabi=ilp32",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv32imafc -mabi=ilp32 -mcpu=sifive-e76
],
"sifive-u54": [
"-model=sifive-u54",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u54",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u54
],
"sifive-u74": [
"-model=sifive-u74",
"-mtriple=riscv64-unknown-linux-gnu",
"-mcpu=sifive-u74",
"-mabi=lp64d",
# cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u74
],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-keys=arm_cpu,cpu", "-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return Target(" ".join(["llvm"] + opts))
def hexagon(cpu_ver="v68", **kwargs):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v68")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
use_qfloat : bool (default: True for cpu_ver >= v68, False otherwise)
Whether to use QFloat HVX instructions.
use_ieee_fp : bool (default: False)
Whether to use IEEE HVX instructions
num_cores : int (default: 4)
The number of HVX threads. This attribute is required by meta scheduler.
vtcm_capacity: int (default: 0)
Hexagon VTCM capacity limitation. If the value is 0, the capacity is treated as unbounded.
Note: Floating point support in HVX requires LLVM 14+.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128b
def get_arch_version(cpu_ver):
m = re.match(r"v([0-9]+).*", cpu_ver)
assert m
return int(m.group(1))
# Check for valid codegen cpu
valid_hex = ["v65", "v66", "v67", "v67t", "v68", "v69", "v71", "v73"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
# Target configuration:
arch_version = get_arch_version(cpu_ver)
config = {
"hvx": 128,
"llvm_options": None,
"use_qfloat": arch_version >= 68,
"use_ieee_fp": False,
"vtcm_capacity": 0,
}
config.update(kwargs)
# Warn about obsolete parameter names.
if config.get("sim_args") or config.get("sim_options"):
msg = (
"Setting simulator options in target is deprecated, set environment variable "
"HEXAGON_SIM_ARGS instead"
)
warnings.warn(msg, stacklevel=2)
if config.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
config.update({"llvm_options": config["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, config):
"""Create LLVM target string."""
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(config):
features = {
"use_qfloat": "hvx-qfloat",
"use_ieee_fp": "hvx-ieee-fp",
}
tfs = []
if config["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not config["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"]
else:
tfs += ["-hvx"]
# All the additional features happen to only apply to v68+.
# Don't bother applying them (even with '-') to lower versions.
if arch_version >= 68:
tfs += ["-+"[config[f]] + features[f] for f in features]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(config)
# LLVM options string
def create_llvm_options(cpu_ver, config): # pylint: disable=unused-argument
"""Create LLVM options string."""
llvm_options = config["llvm_options"]
# To enable auto-vectorization for v68 target added the below llvm-option by default
if arch_version == 68:
if not llvm_options:
llvm_options = ""
llvm_options += " -force-hvx-float"
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.strip()) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
target_str = create_llvm_target(cpu_ver, config)
llvm_str = create_llvm_options(cpu_ver, config)
args_list = target_str.split() + llvm_str.split()
num_cores = config["num_cores"] if "num_cores" in kwargs else 4
args_list.append("--num-cores=%d" % num_cores)
args_list.append("--vtcm-capacity=%d" % config["vtcm_capacity"])
return Target(" ".join(["hexagon"] + args_list))
STM32_SUPPORTED_SERIES = {
# High-Performance
"stm32H7xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m7", "-march=armv7e-m"],
"stm32F7xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m7"],
"stm32F4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F2xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"],
# Mainstream
"stm32G0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0+"],
"stm32F0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0"],
"stm32F1xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32G4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32F3xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"],
# Low-power
"stm32U5xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L5xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m33"],
"stm32L4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"],
"stm32L1xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"],
"stm32L0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0+"],
}
def stm32(series="unknown", options=None):
"""Returns a STM32 target.
Parameters
----------
series: str
Series name of a STM32 board series, eg. stm32H7xx or stm32F4xx
options : str or list of str
Additional options
"""
if series not in STM32_SUPPORTED_SERIES:
raise ValueError(f"Series {series} is not supported by tvm.target.stm32.")
opts = _merge_opts(STM32_SUPPORTED_SERIES[series], options)
return Target(" ".join(["c"] + opts))
def adreno(model="unknown", options=None):
"""Returns a Qualcomm GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=adreno", "-model=%s" % model]
opts = _merge_opts(opts, options)
return Target(" ".join(["opencl"] + opts))
def create(target):
"""Deprecated. Use the constructor of :py:mod:`tvm.target.Target` directly."""
warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead")
return Target(target)
@_register_func("target._load_config_dict")
def _load_config_dict(config_dict_str):
try:
config = json.loads(config_dict_str)
except json.decoder.JSONDecodeError:
return None
if not isinstance(config, dict):
return None
for key in config.keys():
if not isinstance(key, str):
return None
return config
| 31,796 | 36.059441 | 100 | py |
tvm | tvm-main/python/tvm/target/intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target dependent intrinsic registration."""
from tvm.ir import register_intrin_lowering
from tvm.tir import call_pure_extern
def _rule_float_suffix(op):
"""Intrinsic rule: Add float suffix if it is float32.
This is an example intrinsic generation rule.
Parameters
----------
op : PrimExpr
The call expression of original intrinsic.
Returns
-------
ret : PrimExpr
The translated intrinsic rule.
Return same op if no translation is possible.
See Also
--------
register_intrin_lowering : The registration function for intrinsic lowering rule.
"""
name = op.op.name
assert name.startswith("tir.")
prefix = name[4:]
if op.dtype == "float32":
return call_pure_extern(op.dtype, "%sf" % prefix, *op.args)
if op.dtype == "float64":
return call_pure_extern(op.dtype, prefix, *op.args)
return op
def _rule_float_direct(op):
"""Intrinsic rule: Directly call pure extern function for floats.
This is an example intrinsic generation rule.
Parameters
----------
op : PrimExpr
The call expression of original intrinsic.
Returns
-------
ret : PrimExpr
The translated intrinsic rule.
Return same op if no translation is possible.
See Also
--------
register_intrin_lowering : The registration function for intrinsic lowering rule.
"""
if str(op.dtype).startswith("float"):
return call_pure_extern(op.dtype, op.op.name[4:], *op.args)
return None
# opencl pattern for exp
register_intrin_lowering("tir.exp", target="opencl", f=_rule_float_direct, level=99)
# default pattern for exp
register_intrin_lowering("tir.exp", target="default", f=_rule_float_suffix, level=99)
| 2,551 | 30.121951 | 85 | py |
tvm | tvm-main/python/tvm/target/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common x86 related utilities"""
from .._ffi import register_func
from .target import Target
@register_func("tvm.target.x86.target_has_sse41")
def target_has_sse41(target):
return (
target_has_sse42(target)
or target_has_avx(target)
or target_has_avx2(target)
or target_has_avx512(target)
or target_has_vnni(target)
or target
in {
"btver2",
"penryn",
}
)
@register_func("tvm.target.x86.target_has_sse42")
def target_has_sse42(target):
return (
target_has_avx(target)
or target_has_avx2(target)
or target_has_avx512(target)
or target_has_vnni(target)
or target
in {
"silvermont",
"slm",
"goldmont",
"goldmont-plus",
"tremont",
"nehalem",
"corei7",
"westmere",
"bdver1",
"bdver2",
"bdver3",
"x86-64-v2",
}
)
@register_func("tvm.target.x86.target_has_avx")
def target_has_avx(target):
return (
target_has_avx2(target)
or target_has_avx512(target)
or target_has_vnni(target)
or target in {"sandybridge", "corei7-avx", "ivybridge", "core-avx-i"}
)
@register_func("tvm.target.x86.target_has_avx2")
def target_has_avx2(target):
return (
target_has_avx512(target)
or target_has_vnni(target)
or target
in {
"haswell",
"core-avx2",
"broadwell",
"skylake",
"bdver4",
"znver1",
"znver2",
"znver3",
"x86-64-v3",
}
)
@register_func("tvm.target.x86.target_has_avx512")
def target_has_avx512(target):
return target in {
"skylake-avx512",
"skx",
"knl",
"knm",
"x86-64-v4",
"cannonlake",
# explicit enumeration of VNNI capable due to collision with alderlake
"cascadelake",
"icelake-client",
"icelake-server",
"rocketlake",
"tigerlake",
"cooperlake",
"sapphirerapids",
}
@register_func("tvm.target.x86.target_has_vnni")
def target_has_vnni(target):
return target in {
"cascadelake",
"icelake-client",
"icelake-server",
"rocketlake",
"tigerlake",
"cooperlake",
"sapphirerapids",
"alderlake",
}
@register_func("tvm.target.x86.target_has_amx")
def target_has_amx(target):
return target in {
"sapphirerapids",
}
@register_func("tvm.topi.x86.utils.get_simd_32bit_lanes")
def get_simd_32bit_lanes():
mcpu = Target.current().mcpu
fp32_vec_len = 4
if target_has_avx512(mcpu):
fp32_vec_len = 16
elif target_has_avx2(mcpu):
fp32_vec_len = 8
return fp32_vec_len
| 3,680 | 24.741259 | 78 | py |
tvm | tvm-main/python/tvm/target/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Code generation related functions."""
from . import _ffi_api
from .target import Target
def build_module(mod, target):
"""Build IRModule into Module.
Parameters
----------
mod : tvm.IRModule
The ir module.
target : str
The target module type.
Returns
-------
module : runtime.Module
The corressponding module.
"""
target = Target(target) if isinstance(target, str) else target
return _ffi_api.Build(mod, target)
def llvm_lookup_intrinsic_id(name):
"""Lookup LLVM intrinsic id by name.
Parameters
----------
name : str
The name of the intrinsic.
Returns
-------
intrin_id : int
The intrinsic id.
"""
return _ffi_api.llvm_lookup_intrinsic_id(name)
def llvm_get_intrinsic_name(intrin_id: int) -> str:
"""Get the name of an intrinsic for a given id.
Parameters
----------
intrin_id : int
The id of the intrinsic.
Returns
-------
name : str
The name of the intrinsic.
"""
return _ffi_api.llvm_get_intrinsic_name(intrin_id)
def llvm_version_major(allow_none=False):
"""Get the major LLVM version.
Parameters
----------
allow_none : bool
Whether do we allow none.
Returns
-------
major : int
The major LLVM version.
"""
try:
return _ffi_api.llvm_version_major()
except AttributeError:
if allow_none:
return None
raise RuntimeError("LLVM version is not available, please check if you built TVM with LLVM")
| 2,367 | 24.462366 | 100 | py |
tvm | tvm-main/python/tvm/target/virtual_device.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Python bindings for creating VirtualDevices."""
import tvm
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object
class VirtualDevice(Object):
"""A compile time representation for where data is to be stored at runtime,
and how to compile code to compute it."""
def __init__(self, device=None, target=None, memory_scope="") -> None:
if device is None:
# The 'unconstrained' device has device type -1 and device id -1.
device = tvm.device(-1, -1)
self.__init_handle_by_constructor__(
_ffi_api.VirtualDevice_ForDeviceTargetAndMemoryScope, device, target, memory_scope
)
@property
def device_type(self) -> int:
return self.device_type_int
| 1,542 | 36.634146 | 94 | py |
tvm | tvm-main/python/tvm/target/datatype.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Bring Your Own Datatypes custom datatype framework
TODO(@gussmith23 @hypercubestart) link to BYODT docs when they exist"""
import tvm
from tvm.runtime import convert, DataType
from tvm.tir.expr import (
Call as _Call,
Cast as _Cast,
FloatImm as _FloatImm,
BinaryOpExpr as _BinaryOpExpr,
)
from tvm.tir.op import call_pure_extern
from tvm._ffi import register_func as _register_func
from tvm.tir import call_intrin
def register(type_name, type_code):
"""Register a custom datatype with the given type name and type code
Currently, the type code is manually allocated by the user, and the user
must ensure that no two custom types share the same code. Generally, this
should be straightforward, as the user will be manually registering all of
their custom types.
Example:
.. code-block:: python
# Register a dtype named 'posites2' under type code 130.
tvm.target.datatype.register('posites2', 130)
Parameters
----------
type_name : str
The name of the custom datatype.
type_code : int
The type's code, which should be >= kCustomBegin. See
include/tvm/runtime/data_type.h.
"""
tvm.runtime._ffi_api._datatype_register(type_name, type_code)
def get_type_name(type_code):
"""Get the type name of a custom datatype from the type code.
Note that this only works for custom datatypes registered with
tvm.target.datatype.register(). It does not work for TVM-native types.
Example:
.. code-block:: python
tvm.target.datatype.register('posites2', 130)
assert tvm.target.datatype.get_type_name(130) == 'posites2'
Parameters
----------
type_code : int
The type code of the custom datatype.
Returns
-------
type_name : String
The name of the custom datatype.
"""
return tvm.runtime._ffi_api._datatype_get_type_name(type_code)
def get_type_code(type_name):
"""Get the type code of a custom datatype from its type name
Note that this only works for custom datatypes registered with
tvm.target.datatype.register(). It does not work for TVM-native types.
Example:
.. code-block:: python
tvm.target.datatype.register('posites2', 130)
assert tvm.target.datatype.get_type_code('posites2') == 130
Parameters
----------
type_name : str
The type name
Returns
-------
type_code : int
The type code of the custom datatype.
"""
return tvm.runtime._ffi_api._datatype_get_type_code(type_name)
def get_type_registered(type_code):
"""Returns true if a custom datatype is registered under the given type code
Example:
.. code-block:: python
tvm.target.datatype.register('posites2', 130)
assert tvm.target.datatype.get_type_registered(130)
Parameters
----------
type_code: int
The type code
Returns
-------
type_registered : bool
True if a custom datatype is registered under this type code, and false
otherwise.
"""
return tvm.runtime._ffi_api._datatype_get_type_registered(type_code)
def register_op(
lower_func, op_name, target, src_type_name, dest_type_name=None, intrinsic_name=None
):
"""Register a lowering function for a specific operator of a custom datatype
At build time, Relay must lower operators over custom datatypes into
operators it understands how to compile. For each custom datatype operator
which Relay finds while lowering custom datatypes, Relay expects to find a
user-defined lowering function. Users register their user-defined lowering
functions using this function.
Users should use create_lower_func to create their lowering function. It
should serve most use-cases.
Currently, this will work with Casts, intrinsics (e.g. sqrt, sigmoid), and
binary expressions (e.g. Add, Sub, Mul, Div).
See the LowerCustomDatatypes pass to see how registered functions are used.
Lowering Functions
------------------
TODO(@gussmith23) Get the terminology right here.
Lowering functions take in a Relay node, and should return a semantically
equivalent Relay node which Relay can build. This means that the returned
node should not contain any custom datatypes. Users should likely not need
to define lowering functions by hand -- see the helper function
create_lower_func.
Parameters
----------
lower_func : function
The lowering function to call. See create_lower_func.
op_name : str
The name of the operation which the function computes, given by its
class name (e.g. Add, LE, Cast, Call).
target : str
The name of codegen target.
src_type_name : str
The name of the custom datatype, e.g. posites2 (but not custom[posites2]32).
If op_name is not "Cast", then target type is guaranteed to be the same as src_type_name.
dest_type_name : str
If op_name is "Cast", then this is required and should be set to the dest datatype of
the argument to the Cast. If op_name is not "Cast", this is unused.
intrinsic_name : str
If op_name is "Call" and intrinsic_name is not None, then we assume the
op is a Call to an Intrinsic, and intrinsic_name is the intrinsic's
name.
"""
if op_name == "Cast":
assert dest_type_name is not None
lower_func_name = (
"tvm.datatype.lower."
+ target
+ "."
+ op_name
+ "."
+ dest_type_name
+ "."
+ src_type_name
)
elif op_name == "Call" and intrinsic_name is not None:
lower_func_name = (
"tvm.datatype.lower."
+ target
+ "."
+ op_name
+ ".intrin."
+ intrinsic_name
+ "."
+ src_type_name
)
else:
lower_func_name = "tvm.datatype.lower." + target + "." + op_name + "." + src_type_name
tvm._ffi.register_func(lower_func_name, lower_func)
def register_min_func(func, type_name):
"""Register the function that returns the minimum representable value of type_name.
Operators such as max pooling and argmax require the minimum
finite value representable by the datatype the op operating on.
Users can use this function to register a function that returns a TIR expression node
outputting the minimum representable value of their custom data type.
Users should use create_min_lower_func to create their lowering function. It
should serve most use-cases.
Note: for special cases when it is known that the custom datatype is representable
by a float, the user can create their own lowering func that returns a FloatImm.
The benefits are allowing optimizations such as rewrites to work as expected on custom
datatypes.
Parameters
----------
func : function
Input is an integer num_bits, should return a TIR expression node that
represents a scalar tensor of type custom[type_name]num_bits with the minimum
representable value.
type_name : str
The name of the custom datatype, e.g. posites2 (but not custom[posites2]32).
"""
_register_func("tvm.datatype.min." + type_name, func)
def create_min_lower_func(extern_func_map, type_name):
"""Returns a lowering function for getting the minimum value of a custom datatype.
Parameters
----------
extern_func_map : map
A map from bit lengths to the name of the extern "C" function to lower to.
type_name : string
The name of the custom datatype, e.g. posites2 (but not custom[posites2]32).
"""
def lower(num_bits):
dtype = f"custom[{type_name}]{num_bits}"
if num_bits not in extern_func_map:
raise RuntimeError("missing minimum function for {dtype}")
return call_pure_extern(dtype, extern_func_map[num_bits])
return lower
def create_lower_func(extern_func_map):
"""Returns a function which lowers an operation to a function call.
Parameters
----------
extern_func_map : map
If lowering a Cast, extern_func_map should be a map from tuples of
(src_bit_length, dest_bit_length) to the name of the extern "C" function to lower to.
Otherwise, for unary and binary ops, it should simply be a map
from bit_length to the name of the extern "C" function to lower to.
"""
def lower(op):
"""
Takes an op---either a Cast, Call, or a binary op (e.g. an Add) and returns a
call to the specified external function, passing the op's argument
or arguments. The return type of the call depends
on the type of the op: if it is a custom type, then a uint of the same
width as the custom type is returned. Otherwise, the type is
unchanged."""
dtype = op.dtype
t = DataType(dtype)
if get_type_registered(t.type_code):
dtype = "uint" + str(t.bits)
if t.lanes > 1:
dtype += "x" + str(t.lanes)
key = t.bits
if isinstance(op, _Cast):
src_bits = DataType(op.value.dtype).bits
key = (src_bits, t.bits)
if key not in extern_func_map:
raise RuntimeError(f"missing key {key} in extern_func_map for {op.astext()}")
if isinstance(op, _Cast):
return call_pure_extern(dtype, extern_func_map[key], op.value)
if isinstance(op, _FloatImm):
return call_pure_extern(dtype, extern_func_map[key], op.value)
if isinstance(op, _Call):
return call_pure_extern(dtype, extern_func_map[key], *op.args)
if isinstance(op, _BinaryOpExpr):
return call_pure_extern(dtype, extern_func_map[key], op.a, op.b)
raise RuntimeError(f"lowering unsupported op: {op.astext()}")
return lower
def lower_ite(ite_op):
"""Lowered if then else function that calls intrinsic if_then_else.
Unlike a function lowered by create_lower_func, this function
calls the tvm intrinsic if_then_else.
Parameters
----------
ite_op : Op
Takes an if then else op and returns a
call to tir.if_then_else function, passing the op's
arguments. The return type of the call if a uint of the same
width as the custom type is returned.
"""
dtype = ite_op.dtype
t = tvm.DataType(dtype)
assert get_type_registered(t.type_code)
dtype = "uint" + str(t.bits)
if t.lanes > 1:
dtype += "x" + str(t.lanes)
return call_intrin(
dtype,
"tir.if_then_else",
convert(ite_op.args[0]),
convert(ite_op.args[1]),
convert(ite_op.args[2]),
)
def lower_call_pure_extern(op):
"""Lowered call pure extern function that calls intrinsic call_pure_extern.
Unlike a function lowered by create_lower_func, this function
calls the tvm intrinsic call_pure_extern.
Parameters
----------
ite_op : Op
Takes a call_pure_extern op and returns a
call to tir.call_pure_extern function, passing the op's
arguments. The return type of the call if a uint of the same
width as the custom type is returned.
"""
dtype = op.dtype
t = tvm.DataType(dtype)
assert get_type_registered(t.type_code)
dtype = "uint" + str(t.bits)
if t.lanes > 1:
dtype += "x" + str(t.lanes)
return call_intrin(dtype, "tir.call_pure_extern", *op.args)
| 12,348 | 32.375676 | 97 | py |
tvm | tvm-main/python/tvm/target/tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target tags"""
from typing import Any, Dict, Optional
from . import _ffi_api
from .target import Target
def list_tags() -> Optional[Dict[str, Target]]:
"""Returns a dict of tags, which maps each tag name to its corresponding target.
Returns
-------
tag_dict : Optional[Dict[str, Target]]
The dict of tags mapping each tag name to its corresponding target.
None if TVM is built in runtime-only mode.
"""
if hasattr(_ffi_api, "TargetTagListTags"):
return _ffi_api.TargetTagListTags()
return None
def register_tag(name: str, config: Dict[str, Any], override: bool = False) -> Optional[Target]:
"""Add a user-defined tag into the target tag registry.
Parameters
----------
name: str
Name of the target, e.g. "nvidia/gtx1080ti"
config : Dict[str, Any]
The config dict used to create the target
override: bool
A boolean flag indicating if overriding existing tags are allowed.
If False and the tag has been registered already, an exception will be thrown.
Returns
-------
target : Optional[Target]
The target corresponding to the tag
None if TVM is built in runtime-only mode.
Examples
--------
.. code-block:: python
register_tag("nvidia/gtx1080ti", config={
"kind": "cuda",
"arch": "sm_61",
})
"""
if hasattr(_ffi_api, "TargetTagAddTag"):
return _ffi_api.TargetTagAddTag(name, config, override)
return None
# To check the correctness of all registered tags, the call is made in library loading time.
list_tags()
# We purposely maintain all tags in the C++ side to support pure C++ use cases,
# and the Python API is only used for fast prototyping.
register_tag(
"nvidia/gtx1080ti",
config={
"kind": "cuda",
"arch": "sm_61",
},
)
| 2,661 | 31.463415 | 96 | py |
tvm | tvm-main/python/tvm/target/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.target"""
import tvm._ffi
tvm._ffi._init_api("target", __name__)
| 872 | 38.681818 | 62 | py |
tvm | tvm-main/python/tvm/target/generic_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic function."""
import tvm._ffi
try:
from decorator import decorate
except ImportError:
# Allow decorator to be missing in runtime
if not tvm._ffi.base._RUNTIME_ONLY:
raise
from tvm.runtime import Object
from .target import Target
from . import _ffi_api
@tvm._ffi.register_object
class GenericFunc(Object):
"""GenericFunc node reference. This represents a generic function
that may be specialized for different targets. When this object is
called, a specialization is chosen based on the current target.
Note
----
Do not construct an instance of this object, it should only ever be
used as a return value from calling into C++.
"""
def __call__(self, *args):
return _ffi_api.GenericFuncCallFunc(self, *args)
def set_default(self, func, allow_override=False):
"""Set the default function to be used if no specializations match
the current target.
Parameters
----------
func : function
The default function
allow_override : bool
Whether to allow the current default to be overridden
"""
_ffi_api.GenericFuncSetDefault(self, func, allow_override)
def register(self, func, key_list, allow_override=False):
"""Register a specialization for this GenericFunc.
Parameters
----------
func : function
The function to be registered.
key : str or list of str
The key to be registered.
allow_override : bool, optional
Whether to allow existing keys to be overridden.
"""
key_list = [key_list] if isinstance(key_list, str) else key_list
_ffi_api.GenericFuncRegisterFunc(self, func, key_list, allow_override)
def get_packed_func(self):
"""Get the packed function specified for the current target.
Returns
-------
func : PackedFunc
The function specified for the current target. Return the default
function if no specializations match the current target.
"""
return _ffi_api.GenericFuncGetPackedFunc(self)
def get_native_generic_func(name):
"""Get a generic function from the global registry. If no
function is registered under the given name, a new generic
function is created.
Parameters
----------
name : string
The name of the generic function to get
Returns
-------
func : GenericFunc
The generic function for the given name
"""
return _ffi_api.GenericFuncGetGlobal(name)
def override_native_generic_func(func_name):
"""Override a generic function defined in C++
Generic function allows registration of further functions
that can be dispatched on current target context.
If no registered dispatch is matched, the fdefault will be called.
Parameters
----------
func_name : string
The name of the generic func to be overridden
Returns
-------
fgeneric : function
A wrapped generic function.
Example
-------
.. code-block:: python
import tvm
# wrap function as target generic
@tvm.target.override_native_generic_func("my_func")
def my_func(a):
return a + 1
# register specialization of my_func under target cuda
@my_func.register("cuda")
def my_func_cuda(a):
return a + 2
# displays 3, because my_func is called
print(my_func(2))
# displays 4, because my_func_cuda is called
with tvm.target.cuda():
print(my_func(2))
"""
generic_func_node = get_native_generic_func(func_name)
def fdecorate(fdefault):
"""Wrap a target generic function, overriding the previous
default that was set for the generic function.
Parameters
----------
fdefault : function
The default function.
Returns
-------
fgeneric : function
A wrapped generic function.
"""
generic_func_node.set_default(fdefault, allow_override=True)
def register(key, func=None, override=True):
"""Register function to be the dispatch function.
Parameters
----------
key : str or list of str
The key to be registered.
func : function
The function to be registered.
override : bool, optional
Whether override existing registration.
Returns
-------
The register function is necessary.
"""
def _do_reg(myf):
generic_func_node.register(myf, key, override)
return myf
if func:
return _do_reg(func)
return _do_reg
def dispatch_func(func, *args, **kwargs):
# pylint: disable=unused-argument
"""The wrapped dispath function"""
if kwargs:
raise RuntimeError(
"Keyword arguments cannot be used when invoking generic_func %s" % func_name
)
return generic_func_node(*args)
fresult = decorate(fdefault, dispatch_func)
fresult.fdefault = fdefault
fresult.register = register
fresult.generic_func_node = generic_func_node
return fresult
return fdecorate
def generic_func(fdefault):
"""Wrap a target generic function.
Generic function allows registration of further functions
that can be dispatched on current target context.
If no registered dispatch is matched, the fdefault will be called.
Parameters
----------
fdefault : function
The default function.
Returns
-------
fgeneric : function
A wrapped generic function.
Example
-------
.. code-block:: python
import tvm
# wrap function as target generic
@tvm.target.generic_func
def my_func(a):
return a + 1
# register specialization of my_func under target cuda
@my_func.register("cuda")
def my_func_cuda(a):
return a + 2
# displays 3, because my_func is called
print(my_func(2))
# displays 4, because my_func_cuda is called
with tvm.target.cuda():
print(my_func(2))
"""
dispatch_dict = {}
func_name = fdefault.__name__
def register(key, func=None, override=False):
"""Register function to be the dispatch function.
Parameters
----------
key : str or list of str
The key to be registered.
func : function
The function to be registered.
override : bool
Whether override existing registration.
Returns
-------
The register function is necessary.
"""
def _do_reg(myf):
key_list = [key] if isinstance(key, str) else key
for k in key_list:
if k in dispatch_dict and not override:
raise ValueError("Key is already registered for %s" % func_name)
dispatch_dict[k] = myf
return myf
if func:
return _do_reg(func)
return _do_reg
def dispatch_func(func, *args, **kwargs):
"""The wrapped dispatch function"""
target = Target.current()
if target is None:
return func(*args, **kwargs)
for k in target.keys:
if k in dispatch_dict:
return dispatch_dict[k](*args, **kwargs)
return func(*args, **kwargs)
def get_packed_func():
"""The wrapped to get dispatched function"""
target = Target.current()
if target is None:
return fdefault
for k in target.keys:
if k in dispatch_dict:
return dispatch_dict[k]
return fdefault
fdecorate = decorate(fdefault, dispatch_func)
fdecorate.register = register
fdecorate.fdefault = fdefault
fdecorate.dispatch_dict = dispatch_dict
fdecorate.get_packed_func = get_packed_func
return fdecorate
| 8,938 | 28.308197 | 96 | py |
tvm | tvm-main/python/tvm/target/compilation_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Python bindings for creating CompilationConfigs."""
import tvm
from . import _ffi_api
def make_compilation_config(ctxt, target, target_host=None):
"""Returns a CompilationConfig appropriate for target and target_host, using the same
representation conventions as for the standard build interfaces. Intended only for unit
testing."""
raw_targets = tvm.target.Target.canon_multi_target_and_host(target, target_host)
return _ffi_api.MakeCompilationConfig(ctxt, raw_targets)
| 1,281 | 44.785714 | 91 | py |
tvm | tvm-main/python/tvm/target/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target description and codgen module.
TVM's target string is in format ``<target_kind> [-option=value]...``.
Note
----
The list of options include:
- **-device=<device name>**
The device name.
- **-mtriple=<target triple>**
Specify the target triple, which is useful for cross
compilation.
- **-mcpu=<cpuname>**
Specify a specific chip in the current architecture to
generate code for. By default this is infered from the
target triple and autodetected to the current architecture.
- **-mattr=a1,+a2,-a3,...**
Override or control specific attributes of the target,
such as whether SIMD operations are enabled or not. The
default set of attributes is set by the current CPU.
- **-mabi=<abi>**
Generate code for the specified ABI, for example "lp64d".
- **-system-lib**
Build TVM system library module. System lib is a global module that contains
self registered functions in program startup. User can get the module using
:any:`tvm.runtime.system_lib`.
It is useful in environments where dynamic loading api like dlopen is banned.
The system lib will be available as long as the result code is linked by the program.
We can use :py:func:`tvm.target.Target` to create a tvm.target.Target from the target string.
We can also use other specific function in this module to create specific targets.
"""
from .target import Target, create, TargetKind
from .target import (
cuda,
rocm,
mali,
intel_graphics,
arm_cpu,
rasp,
vta,
bifrost,
riscv_cpu,
hexagon,
stm32,
)
from .virtual_device import VirtualDevice
from .compilation_config import make_compilation_config
from .tag import list_tags
from .generic_func import GenericFunc
from .generic_func import generic_func, get_native_generic_func, override_native_generic_func
from . import datatype
from . import codegen
| 2,652 | 31.353659 | 93 | py |
tvm | tvm-main/python/tvm/auto_scheduler/measure_record.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, pointless-string-statement
""" Serialization and other I/O support for measurement records (tuning logs). """
import argparse
import logging
import os
import itertools
import numpy as np
import tvm._ffi
from tvm.runtime import Object
from .measure import MeasureErrorNo, MeasureCallback
from .utils import calc_workload_dis_factor, decode_workload_key
from . import _ffi_api
logger = logging.getLogger("auto_scheduler")
@tvm._ffi.register_object("auto_scheduler.RecordToFile")
class RecordToFile(MeasureCallback):
"""
A measurement callback that writes measurement records into a file.
Parameters
----------
filename : str
File name for this callback to write log to.
"""
def __init__(self, filename):
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
self.__init_handle_by_constructor__(_ffi_api.RecordToFile, filename)
@tvm._ffi.register_object("auto_scheduler.RecordReader")
class RecordReader(Object):
"""
Reader of the json log file.
Parameters
----------
filename : str
File name for this reader to load log from.
"""
def __init__(self, filename):
if not os.path.exists(filename):
logger.warning("%s does not exist!", filename)
# a set to prevent print duplicated message
self.messages = set()
self.__init_handle_by_constructor__(_ffi_api.RecordReader, filename)
def check_workload_key(self, inputs):
"""Check and throw warnings for records with old format workload key.
Parameters
----------
inputs: List[MeasureInput]
The measure inputs to be checked.
Notes
-----
This checker could be deprecated in the future.
"""
for inp in inputs:
_, args = decode_workload_key(inp.task.workload_key)
if args is None:
continue
if not args:
msg = (
"MeasureInput with old format workload key %s should be updated "
"using the script from https://github.com/apache/tvm/pull/7317."
% inp.task.workload_key
)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
def read_lines(self, max_lines=None, skip_lines=0):
"""Read multiple lines from the log file.
Parameters
----------
max_lines : Optional[int]
The maximum number of lines. None to read all lines.
skip_lines : int = 0
Skip the first n lines.
Returns
-------
inputs : List[auto_scheduler.measure.MeasureInput]
The MeasureInputs loaded from the log file.
results : List[auto_scheduler.measure.MeasureResult]
The MeasureResults loaded from the log file.
Notes
-----
Some unimportant and expensive fields in the returned MeasureInput are not deserialized
for faster read speed (e.g. input.task.compute_dag, input.state.stages).
If you want to use them, you can call the :code:`recover_measure_input` below
to rebuild these fields.
"""
inputs, results = _ffi_api.RecordReaderReadLines(
self, max_lines if max_lines else -1, skip_lines
)
self.check_workload_key(inputs)
return inputs, results
def __iter__(self):
while True:
ret = _ffi_api.RecordReaderReadNext(self)
if not ret:
break
self.check_workload_key([ret[0]])
yield ret[0], ret[1] # (input, result)
def load_record_from_string(record):
"""
Load the measure record from string.
Parameters
----------
record: str
A record string, including the serialized MeausreInput and MeasureResult.
Returns
-------
ret: Tuple[MeasureInput, MeasureResult]
A tuple of MeasureInput, MeasureResult.
"""
return _ffi_api.ReadMeasureRecord(record)
def dump_record_to_string(inp, res):
"""
Dump the measure record to a string.
Parameters
----------
inp: MeasureInput
The measure input.
res: MeasureResult
The measure result.
Returns
-------
ret: str
The dumped string.
"""
return _ffi_api.WriteMeasureRecords(inp, res)
def load_records(filename):
"""
Load measurement records from a file.
Parameters
----------
filename : str
File name to load log from.
Returns
-------
logs : List[auto_scheduler.measure.MeasureInput, auto_scheduler.measure.MeasureResult]
Notes
-----
Some unimportant and expensive fields in the returned MeasureInput are not deserialized
for faster read speed (e.g., input.task.compute_dag, input.state.stages).
If you want to use them, you can call the :code:`recover_measure_input` below
to rebuild these fields.
"""
return zip(*RecordReader(filename).read_lines())
def save_records(filename, inputs, results):
"""
Append measure records to file.
Parameters
----------
filename : str
File name to write log to.
inputs: List[MeasureInputs]
The MeasureInputs to be written.
results: List[MeasureResults]
The MeasureResults to be written.
"""
dirname = os.path.dirname(os.path.abspath(filename))
if not os.path.exists(dirname):
os.makedirs(dirname)
_ffi_api.SaveRecords(filename, inputs, results)
def load_best_record(filename, workload_key=None, target=None, include_compatible=False):
"""Return the best measurement pair form a log file. This may return none results if
there is no legal measure pair with the specified workload_key/target found from the log file.
Parameters
----------
filename : str
File name to load log from.
workload_key : Optional[str]
The workload key of the compute declaration.
With `None`, this returns the best measure pair of all workloads.
target : Optional[tvm.target.Target]
The target device.
With `None`, this returns the best measure pair of all target devices.
include_compatible: bool
When set to True, all compatible records in the log file will be considered.
Returns
-------
input : auto_scheduler.measure.MeasureInput
The best State's MeasureInput from this log fine.
result : auto_scheduler.measure.MeasureResult
The best State's MeasureResult from this log fine.
"""
log_reader = RecordReader(filename)
best_cost = 1e30
best_inp = None
best_res = None
for inp, res in log_reader:
if res.error_no != MeasureErrorNo.NO_ERROR:
continue
if target and inp.task.target.kind.name != target.kind.name:
continue
costs = [v.value for v in res.costs]
cost = np.mean(costs)
if workload_key is not None:
dis_f = calc_workload_dis_factor(
decode_workload_key(workload_key), decode_workload_key(inp.task.workload_key)
)
if dis_f == float("inf"):
continue
if not include_compatible and dis_f != 1:
continue
# Since different workloads have different FLOPS, we multiply the factor to
# eliminate this difference, which is basically the concept of throughput.
cost *= dis_f
if cost < best_cost:
best_cost = cost
best_inp = inp
best_res = res
return best_inp, best_res
def distill_record_file(in_file, out_file):
"""
Pick the best entries from a record file and store them to another file.
This function distills the useful log entries from a large log file.
If out_file already exists, the best entries from both
in_file and out_file will be saved.
Parameters
----------
in_file: str
The filename of input
out_file: str or file
The filename of output
"""
# pylint: disable=import-outside-toplevel
from .dispatcher import ApplyHistoryBest
context = load_records(in_file)
dirname = os.path.dirname(os.path.abspath(out_file))
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isfile(out_file):
out_context = load_records(out_file)
context = itertools.chain(context, out_context)
def measure_input_str_key(inp):
return _ffi_api.SerializeMeasureInput(inp)
# Dict[target key,
# Dict[workload hash,
# Dict[workload args, (cost, (MeasureInput, MeasureResult))]]]
# Full type: Dict[str, Dict[str, Dict[Tuple, Tuple[float, Tuple[Measureinput, MeasureResult]]]]]
best_records = {}
for inp, res in context:
if res.error_no != 0:
continue
# Keep the best record for each target and workload.
costs = [x.value for x in res.costs if isinstance(x, tvm.tir.expr.FloatImm)]
cost = np.mean(costs)
for k in inp.task.target.keys:
entry, _, workload_args = ApplyHistoryBest.get_workload_entry(
best_records, k, inp.task.workload_key
)
if workload_args not in entry or cost < entry[workload_args][0]:
entry[workload_args] = (cost, (inp, res))
# Remove duplications by multiple target keys.
out_records = {}
for target_entry in best_records.values():
for workload_entry in target_entry.values():
for _, (inp, res) in workload_entry.values():
out_records[measure_input_str_key(inp)] = (inp, res)
inputs = []
results = []
for inp, res in out_records.values():
inputs.append(inp)
results.append(res)
# create a new file and save the best records
open(out_file, "w")
save_records(out_file, inputs, results)
logger.info("Extract %d best records from %s to %s", len(inputs), in_file, out_file)
def main():
"""The main function for CLI."""
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["distill"], default="distill")
parser.add_argument("-i", "--input", type=str, help="input file")
parser.add_argument("-o", "--output", type=str, default=None, help="output file")
args = parser.parse_args()
logging.basicConfig()
logger.setLevel(logging.INFO)
if args.mode == "distill":
args.output = args.output or args.input + ".best.json"
distill_record_file(args.input, args.output)
"""
Usage:
* Distill the best entries from a large log file
e.g. python -m tvm.auto_scheduler.measure_record --mode distill -i input.json
"""
if __name__ == "__main__":
main()
| 11,650 | 30.746594 | 100 | py |
tvm | tvm-main/python/tvm/auto_scheduler/workload_registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Workload registration and serialization.
We use a json string to represent a workload (a computation graph).
The format of the string is `[func_name, [args...]]`.
The dag should be the return value of this `func_name(*args)`.
Rationale: The workload is actually a compute dag defined by tvm dsl. But serializing compute dags
and matching them efficiently is not easy. Therefore, we use the above string to encode a compute
dag.
These strings are efficient for serialization/matching and won't be too long.
When we need the dag, we decode the string and call the function, which will return the dag.
"""
import json
import logging
import pickle
import tvm._ffi
from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON
from .utils import deserialize_args, get_func_name, serialize_args
logger = logging.getLogger("auto_scheduler")
# Global workload function and hash key registry
# It stores two types of workload:
# 1. User registered tasks. This type of workload is registered
# by the decorator "register_workload"
# 2. Extracted tasks from a relay program. This type of workload is
# registered by function "register_workload_tensors".
#
# For 1, the dictionary maps a function name to its function pointer
# For 2, the dictionary maps a hash key to a list of input/output tensors
WORKLOAD_FUNC_REGISTRY = {}
def register_workload(func_name, f=None, override=False):
"""Register a function that generates a certain workload.
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Parameters
----------
func_name : Union[Function, str]
The generation function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The generation function to be registered.
override : boolean = False
Whether to override existing entry.
Examples
--------
.. code-block:: python
@auto_scheduler.register_workload
def matmul(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in WORKLOAD_FUNC_REGISTRY and not override:
raise RuntimeError(f"{func_name} has been registered already")
WORKLOAD_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def register_workload_tensors(workload_key, tensors, override=True):
"""Register a workload by provding input/output tensors. Since this function is used
when extracting/deserializing tasks, it expects duplicated registrations by default.
Parameters
----------
workload_key: str
The wokrload key of the compute DAG in JSON string.
tensors: List[Tensor]
The input/output tensors of a compute DAG
override : boolean = True
Whether to override existing entry.
Returns
-------
workload_key: str
The wokrload key of the compute DAG in JSON string.
"""
register_workload(workload_key, override=override)(tensors)
return workload_key
def make_workload_key(func, args):
"""Make a workload key by function and arguments.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Args
The args of the function.
Returns
-------
workload_key : str
The workload key of the function.
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func):
func_name = get_func_name(func)
elif isinstance(func, str):
func_name = func
else:
raise ValueError(
"Invalid function: "
+ str(func)
+ " . `make_workload_key` expects a callable function or its function name"
)
if not func_name in WORKLOAD_FUNC_REGISTRY:
raise ValueError(
f"{func} is not registered. "
f"Please register it with @auto_scheduler.register_workload"
)
args = serialize_args(args)
return json.dumps((func_name,) + args)
@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors")
def workload_key_to_tensors(workload_key):
"""Get the input/output tensors from the workload key.
This method is usually used to create a ComputeDAG by workload key.
Parameters
----------
workload_key : str
The input workload key in JSON string. The format is either (func_name, arguments...)
for compute functions, or (hash, shapes...) for ComputeDAG.
Returns
-------
tensors : List[Tensor]
The registered compute declaration Tensors.
"""
global WORKLOAD_FUNC_REGISTRY
# We register ComputeDAG with both hash and argumetns, which are fixed in ComputeDAG,
# so we use an entire workload key to query the ComputeDAG.
if workload_key in WORKLOAD_FUNC_REGISTRY:
return WORKLOAD_FUNC_REGISTRY[workload_key]
# We register compute function with only the function name since
# it does not bind to specific arguments, so we use the function name to query
# the function and call the function with arguments to get the tensors.
workload = json.loads(workload_key)
name = workload[0]
value = WORKLOAD_FUNC_REGISTRY[name]
assert callable(value)
args = deserialize_args(workload[1:])
result = value(*args)
if isinstance(result, tuple):
result = list(result)
return result
def serialize_workload_registry_entry(workload_key):
"""
Serialize a workload registry entry.
This is used when the start method of multiprocessing is spawn.
We need to serialize the entry and register it in the new processes.
Parameters
----------
workload_key : str
The workload key
Returns
-------
data: Tuple
The serialized pickable data
"""
global WORKLOAD_FUNC_REGISTRY
if workload_key in WORKLOAD_FUNC_REGISTRY:
sname = workload_key
else:
workload = json.loads(workload_key)
sname = workload[0]
svalue = WORKLOAD_FUNC_REGISTRY[sname]
if not callable(svalue):
# pylint: disable=assignment-from-no-return
svalue = SaveJSON(svalue)
return sname, svalue
def deserialize_workload_registry_entry(data):
"""
Deserialize a workload registry entry.
This should be used along with :code:`serialize_workload_registry_entry`
Parameters
----------
data: Tuple
The return value of :code:`serialize_workload_registry_entry`
"""
global WORKLOAD_FUNC_REGISTRY
name, value = data
if name not in WORKLOAD_FUNC_REGISTRY:
# pylint: disable=assignment-from-no-return
if not callable(value):
value = LoadJSON(value)
WORKLOAD_FUNC_REGISTRY[name] = value
def save_workload_func_registry(filename):
"""Dump workload function registry to a pickle binary file.
Parameters
----------
filename : str
The filename to dump workload function registry to.
"""
global WORKLOAD_FUNC_REGISTRY
pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, "wb"))
def load_workload_func_registry(filename):
"""Load workload function registry from a pickle binary file.
Parameters
----------
filename : str
The filename to load workload function registry from.
"""
global WORKLOAD_FUNC_REGISTRY
WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, "rb"))
| 8,827 | 30.41637 | 98 | py |
tvm | tvm-main/python/tvm/auto_scheduler/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import json
import logging
import threading
import traceback
import tvm
from tvm import autotvm, transform
from tvm._ffi.base import TVMError
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.target import Target
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import Reduce
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .utils import get_const_tuple
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target, error_list, opt_level=3):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=opt_level,
config={
"relay.backend.use_auto_scheduler": True,
},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
mod = tvm.IRModule.from_expr(mod) if isinstance(mod, relay.Function) else mod
try:
compiler.lower(mod, target)
except TVMError:
error_list.append(f"{traceback.format_exc()}")
finally:
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod,
params,
target,
target_host=None,
hardware_params=None,
include_simple_tasks=False,
dump_workload_to_dag_log=None,
opt_level=3,
other_targets=None,
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
dump_workload_to_dag_log: Optional[str]
A file to dump an association between the workload keys and the actual DAG
opt_level : Optional[int]
The optimization level of the task extractions.
other_targets: Optional[List[tvm.target.Target]]
Other targets for call_all_topi_funcs, e.g., cutlass target.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
target, target_host = Target.canon_target_and_host(target, target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
dispatch_ctx = DispatchContext.current
old_verbose = dispatch_ctx.verbose
dispatch_ctx.verbose = 0
targets = [target]
if other_targets is not None:
targets += other_targets
errors = []
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(
target=call_all_topi_funcs, args=(mod, params, targets, errors, opt_level)
)
build_thread.start()
build_thread.join()
if errors:
error_strings = ["Task extraction had the following errors:"] + errors
raise TVMError("\n".join(error_strings))
dispatch_ctx.verbose = old_verbose
# create search tasks
tasks = []
weights = []
for wkl_key, (weight, func_names) in env.wkl_key_to_weight.items():
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
hardware_params=hardware_params,
# When auto scheduler is used in end to end network, try to apply layout rewrite
# to improve the overall performance
layout_rewrite_option=LayoutRewriteOption.get_target_default(target, True),
task_inputs=(
env.wkl_key_to_input_names[wkl_key]
if wkl_key in env.wkl_key_to_input_names
else None
),
task_inputs_save_to_file=True,
desc=",".join(func_names),
)
)
weights.append(int(weight))
if dump_workload_to_dag_log is not None:
with open(dump_workload_to_dag_log, "w") as f:
json.dump({task.workload_key: str(task.compute_dag) for task in tasks}, f)
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
# same as EXTRACT_TASK but ignore the task without complex ops
EXTRACT_COMPLEX_TASK_ONLY = 1
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.func_name_to_wkl_key = {}
self.wkl_key_to_weight = {}
self.wkl_key_to_input_names = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, func_name, workload_key):
"""Add the workload key of a search task.
Parameters
----------
func_name: str
The function name of the task.
workload_key: str
The workload key of a task.
"""
self.func_name_to_wkl_key[func_name] = workload_key
if workload_key not in self.wkl_key_to_weight:
self.wkl_key_to_weight[workload_key] = (0, set())
weight, func_names = self.wkl_key_to_weight[workload_key]
func_names.add(func_name)
self.wkl_key_to_weight[workload_key] = (weight + 1, func_names)
def add_workload_input_names(self, workload_key, input_names):
"""Add special task inputs to this workload.
Parameters
----------
workload_key : str
The workload key of a task.
input_names : List[str]
A list of input names.
"""
self.wkl_key_to_input_names[workload_key] = input_names
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get input/output tensors and
other useful information.
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors with static shape
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
has_complex_op: bool
Whether the topi compute function includes at least one complex (reduce) op
"""
layout_free_ops = []
inputs = []
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
# We cannot directly add tensors to the set, because the comparison of
# two tensors with ndim=0 is ambiguous.
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body])
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in outs:
traverse(t)
io_tensors = inputs + list(outs)
for tensor in io_tensors:
# Reject the compute if any of its I/O tensors has dynamic shape.
if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]):
return ([], False, False)
return (io_tensors, len(layout_free_ops) > 0, has_complex_op)
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(func_name, outs):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
func_name: str
The name of the function being scheduled.
outs: List[Tensor]
The output tensors of topi compute functions
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
None in the tracing mode so that the fallback topi schedule will be used.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.measure import ( # lazily import to avoid recursive dependency
prepare_input_map,
)
io_tensors, has_layout_free, has_complex_op = traverse_to_get_io_tensors(outs)
if not io_tensors: # The compute includes dynamic shapes which are not supported yet.
return None
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
workload_key = dag.workload_key()
key = register_workload_tensors(workload_key, io_tensors)
target = tvm.target.Target.current()
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag, func_name)
schedule = None
env = TracingEnvironment.current
if env is None:
# in the final build mode
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
return schedule
if env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
env.add_workload_key(func_name, key)
input_map = prepare_input_map(io_tensors, workload_key)
if input_map:
env.add_workload_input_names(key, list(input_map.values()))
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if (
LayoutRewriteOption.get_target_default(target, True) != LayoutRewriteOption.NO_REWRITE
and has_layout_free
):
if state is None:
return None
# rewrite the layout and update the context for the new dag
new_dag = dag.rewrite_layout_from_state(state)
new_key = new_dag.workload_key()
if new_key != key:
dispatch_ctx.update(target, new_key, state)
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
@tvm._ffi.register_func("auto_scheduler.relay_integration.te_compiler_update_weights")
def te_compiler_update_weights(function_weights):
"""A callback for updating the weights of extracted tasks. When using the TE compiler
that avoids compiling the same function multiple times by caching, all extracted tasks
have weight 1, so the TE compiler invokes this callback at the end. In this case,
we override existing weights with the use_count in TE compiler cache.
Parameters
----------
function_weights: Dict[str, int]
Mapping from function names to their weights.
"""
env = TracingEnvironment.current
if env is not None:
# Override this map with the weights in the TE compiler.
env.wkl_key_to_weight = {}
for func_name, weight in function_weights.items():
# If the function name is not in the map, then it means we are not interested in
# this function during task extraction (e.g., a function without reduction).
if func_name not in env.func_name_to_wkl_key:
continue
workload_key = env.func_name_to_wkl_key[func_name]
if workload_key not in env.wkl_key_to_weight:
env.wkl_key_to_weight[workload_key] = (0, set())
# Note that the function appears multiple times in a model will be renamed
# to make sure function names are unique, so we use the workload key generated
# from the function's TE compute to determine their weights.
old_weight, func_names = env.wkl_key_to_weight[workload_key]
func_names.add(func_name)
env.wkl_key_to_weight[workload_key] = (old_weight + weight, func_names)
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def rewrite_tensor_shape(tensor, shape):
"""Rewrite the tensor shape"""
_ffi_api.RewriteTensorShape(tensor, shape)
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get(
"relay.backend.use_auto_scheduler",
False,
)
| 17,027 | 33.469636 | 99 | py |
tvm | tvm-main/python/tvm/auto_scheduler/search_policy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The search policies of TVM auto-scheduler.
The auto-scheduler constructs a search space according to the compute declaration.
It then randomly samples programs from the search space and uses evolutionary search with a
learned cost model to fine tune the sampled programs.
The final optimized programs are sent to actual hardware for measurement.
The above process is repeated until the auto-scheduler runs out of time budget.
Reference:
L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor
Programs for Deep Learning." (OSDI 2020).
"""
import random
import tvm._ffi
from tvm.runtime import Object
from .cost_model import RandomModel
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.SearchCallback")
class SearchCallback(Object):
"""Callback function before or after search process"""
@tvm._ffi.register_object("auto_scheduler.PreloadMeasuredStates")
class PreloadMeasuredStates(SearchCallback):
"""A SearchCallback to load measured states from the log file for a search policy.
This can resume the state of the search policy:
- Making sure an already measured state in former searches will never be measured again.
- The history states can be used to speed up the search process(e.g. SketchPolicy uses
history states as starting point to perform Evolutionary Search).
Parameters
----------
filename : str
The name of the record file.
"""
def __init__(self, filename):
self.__init_handle_by_constructor__(_ffi_api.PreloadMeasuredStates, filename)
@tvm._ffi.register_object("auto_scheduler.PreloadCustomSketchRule")
class PreloadCustomSketchRule(SearchCallback):
"""
A SearchCallback for SketchSearchPolicy that allows users to add
custom sketch rule.
Notes
-----
This is an advanced feature. Make sure you're clear how it works and this should only be used
in SketchSearchPolicy.
Parameters
----------
meet_condition_func: Callable
A function with `(policy, state, stage_id) -> int`. Should return one of the result
enumeration.
apply_func: Callable
A function with `(policy, state, stage_id) -> [[State, int], ...]`.
rule_name: str = "CustomSketchRule"
The name of this custom sketch rule.
"""
# Result enumeration of the condition function.
PASS = 0 # Skip this rule and continue to try the next rules.
APPLY = 1 # Apply this rule and continue to try the next rules.
APPLY_AND_SKIP_REST = 2 # Apply this rule and skip the rest rules.
def __init__(self, meet_condition_func, apply_func, rule_name="CustomSketchRule"):
self.__init_handle_by_constructor__(
_ffi_api.PreloadCustomSketchRule, meet_condition_func, apply_func, rule_name
)
@tvm._ffi.register_object("auto_scheduler.SearchPolicy")
class SearchPolicy(Object):
"""The base class of search policies."""
def continue_search_one_round(self, num_measure, measurer):
"""
Continue the search by doing an additional search round.
Parameters
----------
num_measure: int
The number of programs to measure in this round
measurer: ProgramMeasurer
The program measurer to measure programs
Returns
-------
inputs: List[MeasureInput]
The inputs of measurments in this search round
results: List[MeasureResult]
The results of measurments in this search round
"""
return _ffi_api.SearchPolicyContinueSearchOneRound(self, num_measure, measurer)
def set_verbose(self, verbose):
"""
Set the verbosity level of the search policy.
Parameters
----------
verbose: int
The verbosity level
"""
return _ffi_api.SearchPolicySetVerbose(self, verbose)
@tvm._ffi.register_object("auto_scheduler.EmptyPolicy")
class EmptyPolicy(SearchPolicy):
"""A simple example of the search policy which always returns
the initial naive schedule (state).
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process.
"""
def __init__(self, task, init_search_callbacks=None):
self.__init_handle_by_constructor__(_ffi_api.EmptyPolicy, task, init_search_callbacks)
@tvm._ffi.register_object("auto_scheduler.SketchPolicy")
class SketchPolicy(SearchPolicy):
"""The search policy that searches in a hierarchical search space defined by sketches.
The policy randomly samples programs from the space defined by sketches and use evolutionary
search to fine-tune them.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
program_cost_model : CostModel = RandomModel()
The cost model to estimate the complete schedules.
params : Optional[Dict[str, Any]]
Parameters of the search policy.
See `src/auto_scheduler/search_policy/sketch_search_policy.h` for the definitions.
See `DEFAULT_PARAMS` below to find the default values.
seed : Optional[int]
Random seed.
verbose : int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
init_search_callbacks : Optional[List[SearchCallback]]
Callback functions called before the search process, usually used to do extra
initializations.
Possible callbacks:
- auto_scheduler.PreloadMeasuredStates
- auto_scheduler.PreloadCustomSketchRule
"""
DEFAULT_PARAMS = {
"eps_greedy": 0.05,
"retry_search_one_round_on_empty": 1,
"sample_init_min_population": 50,
"sample_init_use_measured_ratio": 0.2,
"evolutionary_search_population": 2048,
"evolutionary_search_num_iters": 4,
"evolutionary_search_mutation_prob": 0.85,
"cpu_multi_level_tiling_structure": "SSRSRS",
"gpu_multi_level_tiling_structure": "SSSRRSRS",
# Notice: the default thread bind policy of GPU assumes the tiling structure to have at
# least 3 spatial tiling levels in outermost
"max_innermost_split_factor": 64,
"max_vectorize_size": 16,
"disable_change_compute_location": 0,
}
def __init__(
self,
task,
program_cost_model=RandomModel(),
params=None,
seed=None,
verbose=1,
init_search_callbacks=None,
):
if params is None:
params = SketchPolicy.DEFAULT_PARAMS
else:
for key, value in SketchPolicy.DEFAULT_PARAMS.items():
if key not in params:
params[key] = value
self.__init_handle_by_constructor__(
_ffi_api.SketchPolicy,
task,
program_cost_model,
params,
seed or random.randint(1, 1 << 30),
verbose,
init_search_callbacks,
)
def generate_sketches(self, print_for_debug=False):
"""Generate the sketches.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
print_for_debug : bool = False
Whether print out the sketches for debug.
Returns
-------
sketches : List[State]
The generated sketches of this search task.
"""
sketches = _ffi_api.SketchPolicyGenerateSketches(self)
if print_for_debug:
for i, s in enumerate(sketches):
print("=" * 20 + f" {i} " + "=" * 20)
print(s)
return sketches
def sample_initial_population(self):
"""Sample initial population.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Returns
-------
states: List[State]
The sampled states
"""
states = _ffi_api.SketchPolicySampleInitialPopulation(self)
return states
def evolutionary_search(self, init_populations, out_size):
"""Perform evolutionary search.
This python interface is mainly used for debugging and testing.
The actual search is all done in c++.
Parameters
----------
init_populations: List[State]
The initial population states
out_size : int
The size of generated states
Returns
-------
states: List[State]
The generated states
"""
states = _ffi_api.SketchPolicyEvolutionarySearch(self, init_populations, out_size)
return states
| 9,631 | 33.898551 | 97 | py |
tvm | tvm-main/python/tvm/auto_scheduler/compute_dag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" The auto-scheduler's computational graph and related program analyses. """
import hashlib
import json
import tvm._ffi
from tvm.runtime import Object
from tvm.runtime._ffi_node_api import LoadJSON, SaveJSON
from . import _ffi_api
from .loop_state import State, StateObject
from .utils import get_const_tuple
from .workload_registry import workload_key_to_tensors
class LayoutRewriteOption:
"""
Options for applying layout rewrite.
The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone op,
and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a network.
"""
# Do not perform layout rewrite
NO_REWRITE = 0
# Insert layout transformation stages for input placeholders in the compute DAG
INSERT_TRANSFORM_STAGE = 1
# Do not insert layout transformation stages and assume the input placeholders
# are pre-transformed.
# Note: The lowered function with this option does not accept the origial input shapes,
# so this option must be used along with `AutoSchedulerLayoutRewrite` pass in Relay.
REWRITE_FOR_PRE_TRANSFORMED = 2
@staticmethod
def get_target_default(target, in_relay_integration=False):
"""Get the default layout rewrite option for the specified target.
Currently we only enable layout rewrite for cpu / mali backend for now
Parameters
----------
target: tvm.target.Target
The compilation target.
in_relay_integration: bool
If this check is ask for relay integration.
Returns
-------
layout_rewrite_option: LayoutRewriteOption
The default layout rewrite option for the specified target.
"""
layout_rewrite_option = LayoutRewriteOption.NO_REWRITE
if target.kind.name == "llvm" or (
"device" in target.attrs and target.attrs["device"] == "mali"
):
layout_rewrite_option = (
LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED
if in_relay_integration
else LayoutRewriteOption.INSERT_TRANSFORM_STAGE
)
return layout_rewrite_option
@tvm._ffi.register_object("auto_scheduler.ComputeDAG")
class ComputeDAG(Object):
"""
The auto-scheduler's computational graph and related program analyses.
We convert a compute declaration described by `tvm.compute` (could be a single operator or a
subgraph) to a ComputeDAG. It keeps the input/output tensors, all operations in the DAG, and
some static analysis results for the DAG (e.g. the total float operation count,
consumer/producer relations of operations, whether an operation stage should
be tiled/compute inlined).
These analyses can help the search policy to make decisions during the search.
ComputeDAG is also responsible for the interaction between auto-scheduler's `LoopState` and
TVM schedule (e.g. applying the `LoopState` transform steps to a TVM schedule, providing
`LoopState` with extra information got from TVM schedule).
Parameters
----------
compute : Union[List[Tensor], str, tvm.te.Schedule]
Input/output tensors or workload key for a compute declaration.
"""
def __init__(self, compute_or_sche):
if isinstance(compute_or_sche, str):
compute = workload_key_to_tensors(compute_or_sche)
sche = None
elif isinstance(compute_or_sche, (list, tvm.ir.container.Array)):
for item in compute_or_sche:
if not isinstance(item, tvm.te.Tensor):
raise ValueError(
"The input of ComputeDAG should be a list of Tensor, but got %s"
% type(item)
)
compute = compute_or_sche
sche = None
elif isinstance(compute_or_sche, tvm.te.Schedule):
compute = None
sche = compute_or_sche
else:
raise ValueError(
"Invalid compute type: %s. ComputeDAG expects string, list of Tensor, or Schedule"
% type(compute_or_sche)
)
self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, compute, sche)
def get_init_state(self):
"""Get the init state of this ComputeDAG.
Returns
-------
state : State
The initial State without any transform steps.
"""
return State(self.init_state, self)
def apply_steps_from_state(self, state, layout_rewrite=LayoutRewriteOption.NO_REWRITE):
"""
Apply the history transform steps from a State to get a TVM schedule.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
layout_rewrite: LayoutRewriteOption = NoRewrite
Rewrite the layout of placeholders specified by "layout_free_placeholders" attr
to make it most friendly for the generated schedule to read from.
Returns
-------
A `te.schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGApplyStepsFromState(self, state_obj, layout_rewrite)
def print_python_code_from_state(self, state):
"""
Print transform steps in the history of a State as TVM's python schedule code.
This is used to print transformation steps for debugging.
Use `apply_steps_from_state` if you want to get a schedule for code generation.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
str : Str
The Python schedule code.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGPrintPythonCodeFromState(self, state_obj)
def infer_bound_from_state(self, state):
"""
Infer and fill the bound of all iterators of a state.
The states may lose complete bound information after some transform steps
(e.g., compute_at).
We can call this function to infer and fill all the bound information.
This function calls TVM InferBound pass internally to get the bound.
The returned state of this function is guaranteed to have complete iterator extent
information.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
updated_state : State
The State with complete bound information.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
updated_state = State(_ffi_api.ComputeDAGInferBoundFromState(self, state_obj), self)
# Copy the stage_id_map from the original state to make sure the old indices are still
# valid
if isinstance(state, State):
for k, v in state.stage_id_map.items():
updated_state.stage_id_map[k] = v
return updated_state
def rewrite_layout_from_state(self, state):
"""
Rewrite the layout of the DAG according to the history transform steps of a state.
Parameters
----------
state : Union[State, StateObject]
The state from which we get transform steps.
Returns
-------
updated_dag : ComputeDAG
The compute dag with rewritten layout.
"""
state_obj = state if isinstance(state, StateObject) else state.state_object
return _ffi_api.ComputeDAGRewriteLayoutFromState(self, state_obj)
def workload_key(self):
"""Return the workload key of this compute DAG.
The workload key is a JSON string from a tuple of (hash of DAG, tensor shapes...)
Returns
-------
key: str
The workload key of this compute DAG
"""
str_dag = _ffi_api.ComputeDAGPrintDAG(self, True)
hash_func = tvm._ffi.get_global_func(
"auto_scheduler.compute_dag.hash_func", allow_missing=True
)
if hash_func is None:
str_dag = str_dag.encode("utf-8")
hash_key = hashlib.md5(str_dag).hexdigest()
else:
hash_key = hash_func(str_dag)
io_shapes = []
for tensor in self.tensors:
io_shapes.append(get_const_tuple(tensor.shape))
return json.dumps([hash_key] + io_shapes)
def __str__(self):
# pretty print
MAX_LINE_WIDTH = 256
raw_lines = super().__str__().split("\n")
lines = []
for line in raw_lines:
if len(line) > MAX_LINE_WIDTH:
line = (
line[: MAX_LINE_WIDTH // 2] + " ..(OMITTED).. " + line[-MAX_LINE_WIDTH // 2 :]
)
lines.append(line)
return "\n".join(lines)
def __getstate__(self):
return {"tensors": SaveJSON(self.tensors)}
def __setstate__(self, state):
# Since we always use tensors to recover the ComputeDAG, we do not support
# (de)serialization of the ComputeDAG constructed by a schedule.
self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, LoadJSON(state["tensors"]), None)
def get_shape_from_rewritten_layout(rewritten_layout, axis_names):
"""Get the orginal shape from a rewritten layout string.
Parameters
----------
rewritten_layout: str
The layout after rewrite
axis_names: List[str]
Specify the order of axes by names
Returns
-------
shape: List[PrimExpr]
The original shape
"""
return _ffi_api.GetShapeFromRewrittenLayout(rewritten_layout, axis_names)
| 10,770 | 36.529617 | 99 | py |
tvm | tvm-main/python/tvm/auto_scheduler/task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" The task scheduler that allocates the time resources when tuning multiple tasks together
The details of the "gradient" strategy below can be found in the section 6 of this paper:
L. Zheng, C. Jia, M. Sun, Z. Wu, C. Yu, et al. "Ansor : Generating High-Performance Tensor
Programs for Deep Learning." (OSDI 2020).
"""
import os
import time
import math
import logging
import numpy as np
from .search_policy import SearchPolicy, SketchPolicy, PreloadMeasuredStates
from .cost_model import RandomModel, XGBModel
from .utils import array_mean
from .measure import ProgramMeasurer
from .measure_record import RecordReader
from . import _ffi_api
logger = logging.getLogger("auto_scheduler")
def make_search_policies(
search_policy,
search_policy_params,
tasks,
num_measures_per_round,
verbose,
load_model_file=None,
load_log_file=None,
adaptive_training=False,
):
"""Make a list of search policies for a list of search tasks.
It creates one policy per task.
Parameters
----------
search_policy: Union[str, List[SearchPolicy]]
The name of search policy.
search_policy_params: Dict[str, Any]]
The parameters of the search policy.
tasks: List[SearchTask]
The list of all tasks
num_measures_per_round: int
The number of schedules to be measured at each search round.
This should be the same as `TuningOptions.num_measures_per_round`
verbose: int
The verbosity level. 0 for silent.
load_model_file: Optional[str]
Load pre-trained model from this file. If this is None, the cost model will
be trained from scratch.
load_log_file: Optional[str]
Load measurement records from this file. If it is not None, the status of the
task scheduler, search policies and cost models will be restored according to this file.
adaptive_training: bool = False
Option used by XGBModel to reduce the model training frequency when there're too
many logs.
Returns
-------
policies: List[SearchPolicy]
The list of search policies
"""
if search_policy == "default":
search_policy = "sketch.xgb"
if isinstance(search_policy, str):
policy_type, model_type = search_policy.split(".")
if model_type == "xgb":
cost_model = XGBModel(
num_warmup_sample=len(tasks) * num_measures_per_round,
model_file=load_model_file,
adaptive_training=adaptive_training,
)
if load_model_file and os.path.isfile(load_model_file):
logger.info("TaskScheduler: Load pretrained model...")
cost_model.load(load_model_file)
elif load_log_file:
logger.info("TaskScheduler: Reload measured states and train the model...")
cost_model.update_from_file(load_log_file)
elif model_type == "random":
cost_model = RandomModel()
else:
raise ValueError("Invalid search policy: " + search_policy)
if policy_type == "sketch":
if load_log_file:
# use the log file to restore the status of search policies.
init_search_callbacks = [PreloadMeasuredStates(load_log_file)]
else:
init_search_callbacks = None
search_policies = [
SketchPolicy(
task,
cost_model,
params=search_policy_params,
verbose=verbose,
init_search_callbacks=init_search_callbacks,
)
for task in tasks
]
else:
raise ValueError("Invalid search policy: " + search_policy)
else:
# check type
assert isinstance(search_policy, (tuple, list))
for item in search_policy:
assert isinstance(item, SearchPolicy)
search_policies = search_policy
return search_policies
def derive_similarity_tag(dag, log_base=1.618):
"""Derive the tag for similarity check from one computational DAG.
The DAGs with the same tag are considered as similar tasks.
The tag format is <op1-tag>_<op2-tag> ... <log(flop)>.
If the tag is "", then the task is not considered to be similar to any other tasks.
Parameters
----------
dag: ComputeDAG
The input computational DAG
log_base: float = 1.618
The base of log to normalize FLOPS
Returns
-------
tag: str
The tag of this computational DAG.
"""
ret = ""
for op in dag.ops:
tag = op.attrs.get("auto_scheduler_task_scheduler_tag", None)
if tag:
ret += op.attrs["auto_scheduler_task_scheduler_tag"] + "_"
if ret:
ret += "%d" % int(math.log(dag.flop_ct + 1, log_base))
return ret
class TaskScheduler:
"""
Allocate the time resources when tuning multiple tasks together.
This implements two strategies: "round-robin" and "gradient".
Parameters
----------
tasks: List[SearchTask]
All tasks to tune
task_weights: Optional[List[float]]
The weights of tasks.
If provided, the task scheduler will set the objective function to
sum(weight[t] * latency[t]), where weight[t] is the weight of a task
and the lantecy[t] is the lantecy of the task.
If not provided, the task scheduer will assign equal weights to all
tasks (i.e., the objective function is sum(latency[t])).
objective_func: Optional[Callable[List[float] -> float]]
The objective function to be minimized.
The objective function accepts the current latencies of all tasks and returns the
objective.
If not provided, the objective is the weighted sum of the latencies of all tasks.
strategy: str = "gradient"
The scheduling strategy.
"round-robin": Tune tasks in round robin order.
"gradient" : Tune tasks with gradient descent.
load_model_file: Optional[str]
Load pre-trained model from this file. If this is None, the cost model will
be trained from scratch.
load_log_file: Optional[str]
Load measurement records from this file. If it is not None, the status of the
task scheduler, search policies and cost models will be restored according to this file.
verbose: int = 1
The level of verbosity. 0 means silent.
alpha: float = 0.2
The parameter used for 'gradient' strategy
beta: float = 2
The parameter used for 'gradient' strategy
backward_window_size: int = 3
The parameter used for 'gradient' strategy
callbacks: Optional[List[TaskSchedulerCallback]]
The task scheduler callbacks that will be called before and after tuning a task.
If None, PrintTableInfo and LogEstimatedLatency callback will be used.
"""
def __init__(
self,
tasks,
task_weights=None,
objective_func=None,
strategy="gradient",
load_model_file: str = None,
load_log_file: str = None,
alpha: float = 0.2,
beta: float = 2,
gamma: float = 0.5,
backward_window_size: int = 3,
callbacks=None,
):
self.tasks = tasks
if objective_func: # use custom objective function
self.objective_func = objective_func
else: # use weighted sum
if task_weights:
self.objective_func = lambda costs: sum(c * w for c, w in zip(costs, task_weights))
else:
self.objective_func = sum
self.strategy = strategy
self.load_log_file = load_log_file
self.load_model_file = load_model_file
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.backward_window_size = backward_window_size
self.callbacks = (
callbacks
if callbacks is not None
else [PrintTableInfo(), LogEstimatedLatency("total_latency.tsv")]
)
assert len(self.tasks) != 0, "No tasks"
assert self.strategy in ["round-robin", "gradient"]
# task_cts[i] saves how many times task i is tuned
self.task_cts = [0 for _ in range(len(self.tasks))]
# task_best_cts[i] saves the round task i found the best latency
self.task_best_cts = [0 for _ in range(len(self.tasks))]
# task_costs_history[i] saves the latency history of task i
self.task_costs_history = [[] for _ in range(len(self.tasks))]
# best_costs[i] saves the best latency of task i
self.best_costs = 1e10 * np.ones(len(self.tasks))
self.cur_score = self._compute_score(self.best_costs)
self.tune_option = self.measurer = self.search_policies = None
self.ct = self.best_ct = self.best_score = self.tic = None
self.num_measures_per_round = None
self.dead_tasks = set()
# Build similarity groups
self.task_tags = [] # task_id -> tag
self.tag_to_group_id = {} # tag -> group_id
self.group_task_ids = [] # group_id -> all task ids in this group
self.flop_cts = [] # task_id -> the number of floating ops
for i, task in enumerate(self.tasks):
tag = derive_similarity_tag(task.compute_dag)
self.task_tags.append(tag)
self.flop_cts.append(task.compute_dag.flop_ct)
if not tag:
continue
if tag not in self.tag_to_group_id:
self.tag_to_group_id[tag] = len(self.tag_to_group_id)
self.group_task_ids.append([])
self.group_task_ids[self.tag_to_group_id[tag]].append(i)
def tune(
self,
tune_option,
search_policy="default",
search_policy_params=None,
adaptive_training=False,
per_task_early_stopping=None,
):
"""Tune a batch of tasks together.
Parameters
----------
tune_option: TuningOptions
The tuning options applied to all tasks.
search_policy: : Union[str, List[SearchPolicy]] = "default"
The list of search policies.
If it is str,
"default" for the default policy (SketchPolicy + XGBModel),
"sketch.xgb" for SketchPolicy + XGBModel,
"sketch.random" for SketchPolicy + RandomModel.
search_policy_params : Optional[Dict[str, Any]]
The parameters of the search policy
adaptive_training : bool = False
Option used by XGBModel to reduce the model training frequency when there're
too many logs.
per_task_early_stopping : Optional[int]
Stop tuning a task early if getting no improvement after n measurements.
"""
# init members
self.tune_option = tune_option
self.early_stopping_all = (
1e20 if tune_option.early_stopping < 0 else tune_option.early_stopping
)
self.early_stopping_task = (
1e20 if per_task_early_stopping is None else per_task_early_stopping
)
self.measurer = ProgramMeasurer(
tune_option.builder,
tune_option.runner,
tune_option.measure_callbacks,
tune_option.verbose,
)
self.ct = self.best_ct = 0
self.tic = time.time()
# reset num_measures_per_round to make sure every task is tuned at least once
self.num_measures_per_round = min(
tune_option.num_measures_per_round, tune_option.num_measure_trials // len(self.tasks)
)
if self.num_measures_per_round <= 0:
raise ValueError(
"num_measure_trials is too small. Please set it to a higher value."
f"It should be at least {len(self.tasks)} for this model."
)
# restore the status of the task scheduler from a log file
if self.load_log_file:
self._restore_status(self.load_log_file, self.num_measures_per_round)
# make one search policy for one task
self.search_policies = make_search_policies(
search_policy,
search_policy_params,
self.tasks,
self.num_measures_per_round,
tune_option.verbose,
self.load_model_file,
self.load_log_file,
adaptive_training,
)
# do a round robin first to warm up
for idx in range(len(self.tasks)):
# skip warming up this task if it has been tuned before (restored from the log file)
if not self.task_cts[idx]:
self._tune_task(idx)
self.best_ct = self.ct
self.best_score = self.cur_score
# use the specific strategy to choose workload to tune
task_idx = -1
while self.ct < tune_option.num_measure_trials and len(self.dead_tasks) < len(self.tasks):
if self.strategy == "round-robin":
task_idx = (task_idx + 1) % len(self.tasks)
while task_idx in self.dead_tasks:
task_idx = (task_idx + 1) % len(self.tasks)
elif self.strategy == "gradient":
gradients = []
for i in range(len(self.tasks)):
if i in self.dead_tasks:
gradients.append(0)
continue
# compute gradient from chain rule : (delta f / delta g_i)
delta = 1e-4
new_costs = list(self.best_costs)
new_costs[i] -= delta
chain_grad = (
self._compute_score(self.best_costs) - self._compute_score(new_costs)
) / delta
# compute (g_i(t_i) - g(t_i - \Delta t)) / (\Delta t)
if (
self.task_cts[i] - 1 < len(self.task_costs_history[i])
and self.task_cts[i] - 1 - self.backward_window_size >= 0
):
backward_grad = (
self.task_costs_history[i][self.task_cts[i] - 1]
- self.task_costs_history[i][
self.task_cts[i] - 1 - self.backward_window_size
]
) / self.backward_window_size
else:
backward_grad = 0
# compute (g_i(t_i + \Delta t) - g(t_i)) / (\Delta t)
g_next_1 = self.best_costs[i] - (self.best_costs[i] / self.task_cts[i])
g_next_2 = self.beta * 1e30
group_id = self.tag_to_group_id.get(self.task_tags[i], None)
if group_id is not None and len(self.group_task_ids[group_id]) > 1:
best_flops = max(
[
self.flop_cts[j] / self.best_costs[j]
for j in self.group_task_ids[group_id]
]
)
g_next_2 = self.beta * self.flop_cts[i] / best_flops
g_next = min(g_next_1, g_next_2)
forward_grad = g_next - self.best_costs[i]
# combine all grads
grad = chain_grad * (
self.alpha * backward_grad + (1 - self.alpha) * forward_grad
)
assert grad <= 0
gradients.append(grad)
if max(gradients) == min(gradients):
task_idx = np.random.choice(len(gradients))
else:
task_idx = np.argmin(gradients)
else:
raise ValueError("Invalid strategy: " + self.strategy)
self._tune_task(task_idx)
self._adjust_similarity_group(task_idx)
if self.cur_score < self.best_score:
self.best_score = self.cur_score
self.best_ct = self.ct
elif self.ct - self.best_ct >= self.early_stopping_all and all(
cost < 1e9 for cost in self.best_costs
):
if self.tune_option.verbose >= 1:
print(
"Stop early since no performance improvement in the last "
+ str(self.early_stopping_all)
+ " measurement trials."
)
break
def _tune_task(self, task_idx):
"""Tune the select task for one round"""
# Run pre-tune callbacks
for callback in self.callbacks:
callback.pre_tune(self, task_idx)
measure_inputs, measure_results = self.search_policies[task_idx].continue_search_one_round(
self.num_measures_per_round, self.measurer
)
self.task_cts[task_idx] += 1
for res in measure_results:
cost = array_mean(res.costs)
if cost < self.best_costs[task_idx]:
self.task_best_cts[task_idx] = self.task_cts[task_idx]
self.best_costs[task_idx] = cost
# Stop tuning this task in the rest of the process if its search space has been
# fully explored or it has no improvement for a long while.
no_change_trials = (
self.task_cts[task_idx] - self.task_best_cts[task_idx]
) * self.num_measures_per_round
if len(measure_inputs) == 0 or no_change_trials > self.early_stopping_task:
self.dead_tasks.add(task_idx)
self.task_costs_history[task_idx].append(self.best_costs[task_idx])
self.ct += len(measure_inputs)
self.cur_score = self._compute_score(self.best_costs)
# Run post-tune callbacks
for callback in self.callbacks:
callback.post_tune(self, task_idx)
def _compute_score(self, costs):
"""compute the objective function"""
# Make sure to return float.
score = self.objective_func(costs)
return score.value if hasattr(score, "value") else score
def _adjust_similarity_group(self, task_idx):
"""adjust the similarity group for the selected task"""
group_id = self.tag_to_group_id.get(self.task_tags[task_idx], None)
if group_id is None or len(self.group_task_ids[group_id]) <= 1:
return
group_ids = self.group_task_ids[group_id]
best_group_flops = max([self.flop_cts[j] / self.best_costs[j] for j in group_ids])
cur_flops = self.flop_cts[task_idx] / self.best_costs[task_idx]
# if we tune a task for many times but it still cannot achieve
# a similar speed to the fastest one in its group, this means this task
# is actually not similar to other tasks in its group.
# So we will remove it from its original group.
if cur_flops < best_group_flops / self.beta and self.task_cts[task_idx] > 5 + max(
self.task_cts[j] for j in group_ids if j != task_idx
):
self.task_tags[task_idx] = None
group_ids.remove(task_idx)
def _restore_status(self, log_file, num_measures_per_round):
"""restore task_cts and best_costs from a log file"""
str_target = str(self.tasks[0].target)
workload_key_to_task_id = {t.workload_key: i for i, t in enumerate(self.tasks)}
total_ct = -1
for total_ct, (inp, res) in enumerate(RecordReader(log_file)):
if str(inp.task.target) != str_target:
continue
task_idx = workload_key_to_task_id.get(inp.task.workload_key, None)
if task_idx is None:
continue
self.task_cts[task_idx] += 1
if res.error_no == 0:
cost = array_mean(res.costs)
if cost < self.best_costs[task_idx]:
self.best_costs[task_idx] = cost
self.task_best_cts[task_idx] = self.task_cts[task_idx]
for idx in range(len(self.tasks)):
if self.task_cts[idx] - self.task_best_cts[idx] > self.early_stopping_task:
self.dead_tasks.add(idx)
# The computation of taks_cts is just an estimation.
# The estimation may not be accurate if the log file is changed externally or
# `num_measures_per_round` is different from the last tuning.
self.task_cts[idx] = int(self.task_cts[idx] / num_measures_per_round + 0.5)
self.task_best_cts[idx] = int(self.task_best_cts[idx] / num_measures_per_round + 0.5)
self.task_costs_history[idx].append(self.best_costs[idx])
self.cur_score = self._compute_score(self.best_costs)
logger.info("TaskScheduler: Loaded %d measurement records from %s", total_ct + 1, log_file)
class TaskSchedulerCallback:
"""The base class of task scheduler callback functions."""
def pre_tune(self, task_scheduler, task_id):
"""The callback before tuning each task.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task ID going to be tuned.
"""
# Do nothing by default
def post_tune(self, task_scheduler, task_id):
"""The callback after tuning each task.
Parameters
----------
task_scheduler: TaskScheduler
The task scheduler.
task_id: int
The task ID be tuned.
"""
# Do nothing by default
class PrintTableInfo(TaskSchedulerCallback):
"""The callback that prints a table of current progress."""
def pre_tune(self, task_scheduler, task_id):
if task_scheduler.tune_option.verbose < 1:
return
_ffi_api.PrintTitle("Task Scheduler")
print(
"| ID "
"| Task Description "
"| Latency (ms) | Speed (GFLOPS) | Trials |"
)
print(
"----------------------------------------------------------------"
"-------------------------------------------------"
)
# content
for i in range(len(task_scheduler.tasks)):
id_str = f"{i}"
latency_str = (
"%.3f" % (1e3 * task_scheduler.best_costs[i])
if task_scheduler.best_costs[i] < 1e9
else "-"
)
task_desc = task_scheduler.tasks[i].desc
speed_str = (
"%.2f"
% (task_scheduler.tasks[i].compute_dag.flop_ct / task_scheduler.best_costs[i] / 1e9)
if task_scheduler.best_costs[i] < 1e9
else "-"
)
trials_str = "%d" % (task_scheduler.task_cts[i] * task_scheduler.num_measures_per_round)
print(
"| %4s | %61s | %12s | % 14s | %6s |"
% (id_str, task_desc, latency_str, speed_str, trials_str)
)
print(
"----------------------------------------------------------------"
"-------------------------------------------------"
)
# overall info
if all(cost < 1e9 for cost in task_scheduler.best_costs):
total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3)
else:
total_latency_str = "-"
print(
"Estimated total latency: %s ms\tTrials: %d\tUsed time : %.0f s\tNext ID: %d\t"
% (total_latency_str, task_scheduler.ct, time.time() - task_scheduler.tic, task_id)
)
class LogEstimatedLatency(TaskSchedulerCallback):
"""Log the estimated latency to the file after tuning a task.
Parameters
----------
log_file: str
The log file path.
"""
def __init__(self, log_file):
if os.path.exists(log_file): # Remove existing log
os.remove(log_file)
self.log_file = log_file
def post_tune(self, task_scheduler, task_id):
if all(cost < 1e9 for cost in task_scheduler.best_costs):
total_latency_str = "%.3f" % (task_scheduler.cur_score * 1e3)
else:
total_latency_str = "N/A"
with open(self.log_file, "a") as filep:
filep.write(
"ElapsedTime(s)\t%.0f\tEstimatedLatency(ms)\t%s\tTrials\t%d\n"
% (time.time() - task_scheduler.tic, total_latency_str, task_scheduler.ct)
)
filep.flush()
| 25,378 | 37.865237 | 100 | py |
tvm | tvm-main/python/tvm/auto_scheduler/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
""" Common utilities for auto_scheduler. """
from typing import Hashable
import json
import signal
import threading
import traceback
import os
import numpy as np
try:
import psutil
except ImportError:
psutil = None
import tvm
from tvm import rpc
from tvm.tir import expr
from tvm.tir.transform import Simplify
from tvm.ir.transform import Sequential
from ..te import Tensor, placeholder
def decode_workload_key(workload_key):
"""Decode the workload key from a string to the name and arguments. The wokrload key
is expected to be a list of "[func_name/hash, args ...]" in a JSON string. If not,
then simply return the workload key as the name without arguments.
Parameters
----------
workload_key: str
The workload key in string. Format: "[func_name/hash, args ...]".
Returns
-------
name: str
The workload function name or the DAG hash.
args: Optional[Tuple[Any, ...]]
The flatten arguments in a tuple, or None if the workload key format is not decodeable.
"""
def flatten_list(inp):
ret = []
for elt in inp:
if isinstance(elt, list):
ret += flatten_list(elt)
else:
ret.append(elt)
return ret
try:
key_list = json.loads(workload_key)
if isinstance(key_list, list) and len(key_list) >= 1:
return key_list[0], tuple(flatten_list(key_list[1:]))
except json.decoder.JSONDecodeError:
pass
return workload_key, None
def calc_workload_dis_factor(target_workload_pair, workload_pair):
"""Calculate the distance factor of the workload to the target workload.
If two workloads are not compatible at all (i.e., different compute DAG or function),
then the distance factor is "inf". Otherwise, we calculate the factor by traversing
the workload arguments, which are the arguments of the compute function,
or the output shapes for the ComputeDAG. The factor is calculated by the following rules:
1. For non-zero integer values: `product(target_arg / candidate_arg)`.
2. For non-integer or zero values: "inf" if not equal else 1.
As a result, factor=1 is the optimal when two workloads are identical.
Parameters
----------
target_workload_pair: Tuple[str, Optional[Tuple[Any, ...]]]
The target workload pair: (hash, argument tuple).
workload_pair: Tuple[str, Optional[Tuple[Any, ...]]]
The candidate workload pair: (hash, argument tuple).
Returns
-------
dis_f: float
The distance factor.
"""
target_key, target_args = target_workload_pair
target_args = target_args if target_args is not None else []
key, args = workload_pair
args = args if args is not None else []
# Not even the same func/DAG.
if key != target_key or len(target_args) != len(args):
return float("inf")
dis_f = 1
for target_arg, arg in zip(target_args, args):
if isinstance(target_arg, int):
if target_arg == 0 or arg == 0:
if target_arg != arg:
return float("inf")
elif target_arg % arg != 0:
return float("inf")
else:
dis_f *= target_arg / arg
elif target_arg != arg:
return float("inf")
return dis_f
def get_func_name(func):
"""Get name of a function.
Parameters
----------
func: Function
The input function.
Returns
-------
name: str
The function name.
"""
return func.func_name if hasattr(func, "func_name") else func.__qualname__
def get_const_int(exp):
"""Verifies expr is integer and get the constant value.
Parameters
----------
exp : Union[tvm.tir.expr, int]
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(exp, int):
return exp
if not isinstance(exp, expr.IntImm):
opt = Sequential([Simplify()])
exp = opt(exp)
if not isinstance(exp, expr.IntImm):
raise ValueError("Expect value to be constant int")
return exp.value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm, returns tuple of int.
Parameters
----------
in_tuple : Tuple[tvm.tir.expr]
The input.
Returns
-------
out_tuple : Tuple[Union[int,tvm.tir.Var,tvm.tir.Any]]
The output tuple of int. The dynamic shape variables (Var or Any) will be preserved.
"""
ret = []
for elem in in_tuple:
if isinstance(elem, (tvm.tir.Var, tvm.tir.expr.Any)):
ret.append(elem)
else:
ret.append(get_const_int(elem))
return tuple(ret)
def list_to_tuple(x):
"""Convert a list to a tuple recursively."""
assert isinstance(x, list)
return tuple(list_to_tuple(y) if isinstance(y, list) else y for y in x)
def serialize_args(args):
"""
Serialize arguments of a function to a hashable and jsonable tuple.
Currently this is mainly used for tvm.tensor.Tensor
"""
ret = []
if args is None:
return tuple(ret)
for t in args:
if isinstance(t, Tensor):
t = ("TENSOR", get_const_tuple(t.shape), t.dtype)
elif isinstance(t, list):
t = list_to_tuple(t)
assert isinstance(t, Hashable), str(t) + " is not hashable"
ret.append(t)
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`"""
ret = []
for t in args:
if isinstance(t, (tuple, list)) and t[0] == "TENSOR":
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
def kill_child_processes(parent_pid, sig=signal.SIGTERM):
"""kill all child processes recursively"""
if not psutil:
raise ImportError("psutil not found, try `pip install psutil` to fix this")
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
return
try:
children = parent.children(recursive=True)
for process in children:
process.send_signal(sig)
except psutil.NoSuchProcess:
return
# The maximum length of traceback information
MAX_TRACEBACK_INFO_LEN = 512
def make_traceback_info():
"""Get the error message from traceback."""
info = str(traceback.format_exc())
if len(info) > MAX_TRACEBACK_INFO_LEN:
info = (
info[: MAX_TRACEBACK_INFO_LEN // 2] + "\n...\n" + info[-MAX_TRACEBACK_INFO_LEN // 2 :]
)
return info
class PropagatingThread(threading.Thread):
"""A thread that propagates the exception to the main thread"""
def run(self):
self.exc = None
try:
self.ret = self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self.exc = e
def join(self, timeout=None):
super(PropagatingThread, self).join(timeout)
if self.exc:
raise self.exc
return self.ret
def call_func_with_thread(func, args, kwargs):
"""Call a function within a new thread"""
res = []
def wrapper():
res.append(func(*args, **kwargs))
t = PropagatingThread(target=wrapper)
t.start()
t.join()
return res[0]
def call_func_with_timeout(
worker, timeout, func, args=(), kwargs=None
): # pylint: disable=unused-argument
"""Call a function with timeout"""
worker.send(func, args, kwargs, timeout)
try:
res = worker.recv()
except Exception: # pylint: disable=broad-except
res = Exception(make_traceback_info())
return res
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session.
Parameters
----------
device_key : str
The device key of registered device in tracker.
host : Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port : Optional[int]
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority : int = 1
The priority of this request, larger is more prior.
timeout : int = 60
The timeout of this session in second.
Returns
-------
remote : RPCSession
The connected remote RPCSession.
"""
# connect to the tracker
host = host or os.environ["TVM_TRACKER_HOST"]
port = port or int(os.environ["TVM_TRACKER_PORT"])
tracker = rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority, session_timeout=timeout)
return remote
def check_remote(device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device.
Parameters
----------
device_key: str
device key of registered device in tracker.
host: Optional[str]
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST".
port: Optional[int]
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT".
priority: int = 100
The priority of this request, larger is more prior.
timeout: int = 10
The timeout of this check in seconds.
Returns
-------
available: bool
True if can find available device.
"""
def _check():
request_remote(device_key, host, port, priority)
t = threading.Thread(target=_check)
t.start()
t.join(timeout)
return not t.is_alive()
def array_mean(arr):
"""Compute mean of the elments in a TVM Array<PrimExpr>
Parameters
----------
arr: Array
A TVM Array<PrimExpr>
Returns
-------
mean: float
The mean of the elements in the array
"""
return sum(x.value for x in arr) / len(arr)
def to_str_round(x, decimal=6):
"""Convert an object to str and round float numbers
Parameters
----------
x: Union[str, list, int, float, np.ndarray]
The input object
decimal: int
The precision of decimal fraction
Returns
-------
ret: str
The string format of these objects
"""
if isinstance(x, str):
return x
if isinstance(x, (list, tuple, np.ndarray)):
return "[" + ", ".join([to_str_round(y, decimal=decimal) for y in x]) + "]"
if isinstance(x, dict):
return str({k: to_str_round(v) for k, v in x.items()})
if isinstance(x, int):
return str(x)
if isinstance(x, (np.float32, np.float64, float)):
format_str = f"%.{decimal}f"
return format_str % x
raise ValueError(f"Invalid value: {str(x)}\ttype: {type(x)}")
| 11,629 | 27.296837 | 98 | py |
tvm | tvm-main/python/tvm/auto_scheduler/dispatcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The global context that dispatches best schedules to workloads.
In auto-scheduler, a state (loop_state.py::StateObject) saves the
schedule configuration by its transform_steps, so a state is used
as a schedule configuration here.
"""
# pylint: disable=invalid-name
import logging
import pathlib
from collections.abc import Iterable
import numpy as np
from tvm.contrib.utils import tempdir
from tvm.tir.expr import FloatImm
from .cost_model import RandomModel, XGBModel
from .measure import LocalRPCMeasureContext
from .measure_record import RecordToFile, load_records
from .search_policy import PreloadMeasuredStates, SketchPolicy
from .search_task import SearchTask, TuningOptions
from .utils import calc_workload_dis_factor, decode_workload_key
logger = logging.getLogger("auto_scheduler")
class DispatchContext(object):
"""
Base class of dispatch context.
"""
current = None
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload_key, has_complex_op, dag, func_name):
"""
Query the context to get the specific config for a workload.
If this function cannot find the result inside this context, it will query the result
from the upper contexts.
Parameters
----------
target: Target
The current target
workload_key : str
The workload key
has_complex_op: bool
Whether this workload has at least one complex op.
dag: ComputeDAG
The ComputeDAG of the workload.
func_name: str
The function name of this workload.
Returns
-------
state : StateObject
The state that stores schedule configuration for the workload
"""
ret = self._query_inside(target, workload_key, func_name)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name)
return ret
def update(self, target, workload_key, state):
"""
Update the config for a workload
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
state : StateObject
The state that stores schedule configuration for the workload
"""
raise NotImplementedError()
def _query_inside(self, target, workload_key, func_name):
"""
Query the context to get the specific config for a workload.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload_key : str
The current workload_key.
func_name: str
The function name of this workload.
Returns
-------
state : StateObject
The schedule configuration for the workload
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : str, list of str, or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. If it is an iterator,
it can either be a set of str filenames which will be applied jointly,
or a set of (input, result) tuples.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log.
include_compatible: bool
When set to True, compatible records will also be considered.
"""
def __init__(self, records, n_lines=None, include_compatible=False):
super(ApplyHistoryBest, self).__init__()
self.include_compatible = include_compatible
# Dict[str (target key),
# Dict[str (workload hash),
# Dict[tuple (workload args), tuple (State, cost)]]]
self.best_by_targetkey = {}
self.best_by_model = {}
self._best_user_defined = {}
self.load(records, n_lines)
@staticmethod
def get_workload_entry(best_records, target_key, workload_key):
"""Get the entry of the target key and workload key hash in the given best record map.
Parameters
----------
best_records: Dict[str, Dict[str, Dict[str, Any]]]
The best record map.
target_key: str
The first key to the best_records.
workload_key: str
The workload key that can be decoded to workload hash and args.
Returns
-------
entry: Dict[str, Any]
The entry in best_records with target key and workload hash.
workload_hash: str
The workload hash decoded from workload_key.
workload_args: Tuple[Any, ...]
The hashable tuple of workload args decoded from workload_key.
"""
workload_hash, workload_args = decode_workload_key(workload_key)
if target_key not in best_records:
best_records[target_key] = {}
if workload_hash not in best_records[target_key]:
best_records[target_key][workload_hash] = {}
return best_records[target_key][workload_hash], workload_hash, workload_args
def load(self, records, n_lines=None):
"""Load records to this dispatch context
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
n_lines: Optional[int]
if it is not None, only load the first `n_lines` lines of log
"""
joint_records = []
if not isinstance(records, Iterable) or isinstance(records, str):
records = [records]
for rec in records:
if isinstance(rec, pathlib.Path):
rec = str(rec)
if isinstance(rec, str):
rec = load_records(rec)
joint_records += rec
else:
if rec is not None:
joint_records.append(rec)
if not joint_records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in joint_records:
if n_lines is not None and counter >= n_lines:
break
counter += 1
if res.error_no != 0:
continue
costs = [x.value for x in res.costs if isinstance(x, FloatImm)]
cost = np.mean(costs)
# use target keys in tvm target system as key to build best map
for k in inp.task.target.keys:
entry, _, workload_args = self.get_workload_entry(
best_by_targetkey, k, inp.task.workload_key
)
if workload_args not in entry:
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
# use model as key to build best map
entry, _, workload_args = self.get_workload_entry(
best_by_model, inp.task.target.model, inp.task.workload_key
)
if workload_args not in entry:
if inp.task.target.model != "unknown":
entry[workload_args] = (inp.state, cost)
else:
_, other_cost = entry[workload_args]
if other_cost > cost:
entry[workload_args] = (inp.state, cost)
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload_key, func_name):
if target is None:
raise RuntimeError(
"Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.create('llvm'):`"
" above the dispatcher call. So does other target. "
)
def match_record(best_records, target_key, workload_key):
"""The helper function to match the record in the given map
and return the matched state, or None if no match.
"""
ret = None
entry, workload_hash, workload_args = self.get_workload_entry(
best_records, target_key, workload_key
)
if workload_args in entry:
ret = entry[workload_args][0]
elif self.include_compatible:
best_cost = float("inf")
for args, val in entry.items():
dis_f = calc_workload_dis_factor(
(workload_hash, workload_args), (workload_hash, args)
)
if dis_f == float("inf"):
continue
state, cost = val
cost *= dis_f
if ret is None or cost < best_cost:
best_cost = cost
ret = state
return ret
# first try matching by model
ret = match_record(self._best_user_defined, target.model, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_model, target.model, workload_key)
if ret is not None:
return ret
# then try matching by target key
for k in target.keys:
ret = match_record(self._best_user_defined, k, workload_key)
if ret is not None:
return ret
ret = match_record(self.best_by_targetkey, k, workload_key)
if ret is not None:
return ret
return None
def update(self, target, workload_key, state):
entry, _, workload_args = self.get_workload_entry(
self._best_user_defined, target.model, workload_key
)
entry[workload_args] = (state, 1)
for k in target.keys:
entry, _, _ = self.get_workload_entry(self._best_user_defined, k, workload_key)
entry[workload_args] = (state, 1)
class ApplyHistoryBestOrSample(ApplyHistoryBest):
"""
Apply the history best config, or sample a valid schedule if no config is found.
Parameters
----------
records : str or iterator of (auto_scheduler.measure.MeasureInput,\
auto_scheduler.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair. Otherwise, it is an iterator.
sample_simple_workloads: bool
When False, sampling will not apply to simple workloads (w/o reduction).
cost_model_file: str
The filename of the pre-trained XGBoost cost model. If not present, then random
model will be used.
num_measure: int
Meausre the top-N rank of sampled schedules on the device. The default -1 means
no measurement and simply return the top-1 schedule ranked by the cost model.
"""
def __init__(
self, records, sample_simple_workloads=False, cost_model_file=None, num_measure=-1
):
self.sample_simple_workloads = sample_simple_workloads
self.num_measure = num_measure
self.log_dir = tempdir()
if cost_model_file is None:
self.cost_model = RandomModel()
else:
self.cost_model = XGBModel()
self.cost_model.load(cost_model_file)
super(ApplyHistoryBestOrSample, self).__init__(
records, n_lines=None, include_compatible=True
)
def query(self, target, workload_key, has_complex_op, dag, func_name):
if has_complex_op or self.sample_simple_workloads:
ret = self._query_inside(target, workload_key, func_name)
else:
ret = super(ApplyHistoryBestOrSample, self)._query_inside(
target, workload_key, func_name
)
if ret is None:
ret = self._old_ctx.query(target, workload_key, has_complex_op, dag, func_name)
return ret
def _query_inside(self, target, workload_key, func_name):
ret = super(ApplyHistoryBestOrSample, self)._query_inside(target, workload_key, func_name)
if ret is not None:
return ret
# Sampling valid schedules when no existing records can be used.
task = SearchTask(workload_key=workload_key, target=target)
measure_ctx = LocalRPCMeasureContext(min_repeat_ms=300)
log_file = self.log_dir.relpath(f"{decode_workload_key(workload_key)[0]}.log")
while ret is None:
tune_option = TuningOptions(
num_measure_trials=self.num_measure,
runner=measure_ctx.runner,
measure_callbacks=[RecordToFile(log_file)],
verbose=0,
)
search_policy = SketchPolicy(
task,
self.cost_model,
params={
"eps_greedy": 0.01,
"sample_init_min_population": 64,
"evolutionary_search_num_iters": 0,
},
init_search_callbacks=[PreloadMeasuredStates(log_file)],
verbose=0,
)
task.tune(tune_option, search_policy)
# Load the sampled records and query again.
self.load(log_file)
ret = super(ApplyHistoryBestOrSample, self)._query_inside(
target, workload_key, func_name
)
del measure_ctx
return ret
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
This is used as the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
# Verbose level:
# 0: Completely silent.
# 1: Warning the missing configs for querying complex tasks.
# 2: Warning the missing configs for querying all tasks.
self.verbose = 1
# a set to prevent print duplicated message
self.messages = set()
def query(self, target, workload_key, has_complex_op, dag, func_name):
key = (str(target), workload_key)
if key in self.memory:
return self.memory[key]
if self.verbose == 2 or (has_complex_op and self.verbose == 1):
msg = (
f"-----------------------------------\n"
f"{func_name}\n"
f"Cannot find tuned schedules for target={target}, workload_key={workload_key}. "
f"A fallback TOPI schedule is used, "
f"which may bring great performance regression or even compilation failure. "
f"Compute DAG info:\n{dag}"
)
if msg not in self.messages:
self.messages.add(msg)
logger.warning(msg)
state = None
# cache this config to avoid duplicated warning message
self.memory[key] = state
return state
def _query_inside(self, target, workload_key, func_name):
_ = target = workload_key = func_name
raise RuntimeError("This function should never be called")
def update(self, target, workload_key, state):
key = (str(target), workload_key)
self.memory[key] = state
DispatchContext.current = FallbackContext()
| 16,965 | 35.252137 | 98 | py |
tvm | tvm-main/python/tvm/auto_scheduler/loop_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import
"""
The definition of the "state" in the search.
Each LoopState corresponds to a schedule for its ComputeDAG.
A LoopState consists of: 1. a current loop structure; 2. a list of transformation steps used to
construct the loop structure.
The loop structure keeps a preview of how the schedule will finally look like after lowering the
current state (e.g. number of iterators, the extent of each iterator, the compute_at locations
...).
During the schedule search process, the loop structure can provide search policy with necessary
information on how to manipulate the current state.
The transform history is a sequence of `TransformStep` which will finally be mapped to TVM
schedule primitives. The steps are also used for the serialization of a state.
The LoopState can be seen as a lightweight loop structure IR specifically for schedule search.
We don't use the existing TVM IR but to extend a new structure on it is because:
1. We want fast incremental change to the loop structures. The search policy needs to get the
immediate loop structures update rather than after TVM lowering;
2. We want serializable transform history for replay, backtracking, and mutation;
3. We may create some macro schedule primitives that represent the combination of several
TVM schedule primitives.
When the search is finished, we will lower the state to TVM IR with TVM's schedule primitives.
Since we share a lot of common objects during search, the transformation is implemented in
copy on write style. All objects are immutable, which is similar to TVM IR.
"""
import tvm._ffi
from tvm.te.tensor import Operation, Tensor
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("auto_scheduler.Iterator")
class Iterator(Object):
"""A loop iterator structure."""
@tvm._ffi.register_object("auto_scheduler.Stage")
class Stage(Object):
"""A stage in the compute declaration. Similar to tvm.te.schedule.Stage."""
# Static trans table for compute_at location
# This is used to transform the compute_at location to C++ enum
COMPUTE_AT_TRANS_TABLE = {"root": 0, "inlined": 1, "iter": 2}
@tvm._ffi.register_object("auto_scheduler.State")
class StateObject(Object):
"""The internal State object"""
def __eq__(self, other):
return _ffi_api.StateEqual(self, other)
class State:
"""
A state in the search process. It consists of the current loop structure
and a list of transformation steps used to construct it.
Each State corresponds to a specific schedule for its ComputeDAG.
Parameters
----------
state_object : StateObject
The StateObject corresponding to C++ internal State object.
dag : ComputeDAG
The original ComputeDAG of this State.
Notes
-----
This is a wrapper class of StateObject to deal with copy-on-write property
"""
# Static trans table for thread bind and annotation
# This is used to transform the annotation name to C++ enum
ANNOTATION_TRANS_TABLE = {
"none": 0,
"unroll": 1,
"vectorize": 2,
"parallel": 3,
"vthread": 4,
"blockIdx.x": 5,
"threadIdx.x": 6,
"blockIdx.y": 7,
"threadIdx.y": 8,
"blockIdx.z": 9,
"threadIdx.z": 10,
"tensorize": 11,
}
def __init__(self, state_object, dag):
self.state_object = state_object
self.compute_dag = dag
self.stage_id_map = {} # A dict maps operation to stage id
self._update_stage_id_map()
@property
def stages(self):
"""
Returns
-------
stages : List[Stage]
"""
return self.state_object.stages
@property
def transform_steps(self):
"""
Returns
-------
transform_steps : List[transform_steps]
"""
return self.state_object.transform_steps
@property
def stage_ops(self):
"""
Returns
-------
ops: List[Operation]
"""
return [stage.op for stage in self.stages]
def bind(self, stage, iterator, thread_name):
"""Schedule primitive corresponding to `te.Stage.bind`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be binded, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be binded.
thread_name : str
The thread type to be binded. Candidates:
- vthread
- blockIdx.x
- threadIdx.x
- blockIdx.y
- threadIdx.y
- blockIdx.z
- threadIdx.z
Returns
-------
res_it : Iterator
The binded Iterator.
"""
if not thread_name in State.ANNOTATION_TRANS_TABLE.keys():
raise ValueError("Invalid thread_name: ", thread_name)
self.state_object, res = _ffi_api.StateBind(
self.state_object,
self._resolve_stage_id(stage),
iterator,
State.ANNOTATION_TRANS_TABLE[thread_name],
)
return res
def parallel(self, stage, iterator):
"""Schedule primitive corresponding to `te.Stage.parallel`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be paralleled, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be paralleled.
Returns
-------
res_it : Iterator
The paralleled Iterator.
"""
self.state_object, res = _ffi_api.StateParallel(
self.state_object, self._resolve_stage_id(stage), iterator
)
return res
def unroll(self, stage, iterator, max_unroll=None):
"""Schedule primitive corresponding to `te.Stage.unroll`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be unrolled, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be unrolled.
max_unroll : Optional[int]
The max unroll limit. Iterator with extent larger than this limit will be skipped.
Returns
-------
res_it : Iterator
The unrolled Iterator.
"""
self.state_object, res = _ffi_api.StateUnroll(
self.state_object,
self._resolve_stage_id(stage),
iterator,
max_unroll if max_unroll else -1,
)
return res
def vectorize(self, stage, iterator):
"""Schedule primitive corresponding to `te.Stage.vectorize`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be vectorized, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be vectorized.
Returns
-------
res_it : Iterator
The vectorized Iterator.
"""
self.state_object, res = _ffi_api.StateVectorize(
self.state_object, self._resolve_stage_id(stage), iterator
)
return res
def fuse(self, stage, iters):
"""Schedule primitive corresponding to `te.Stage.fuse`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be fused, which can be specified by the integer index, Operation,
or output tensor of the stage.
iters : List[Iterator]
The iterators to be fused.
Returns
-------
res_it : Iterator
The fused Iterator.
Notes
-----
If the iterators to be fused have stages attached at them(by compute_at), the fused
result will become the new attach point.
"""
self.state_object, res = _ffi_api.StateFuse(
self.state_object, self._resolve_stage_id(stage), iters
)
return res
def pragma(self, stage, iterator, pragma_type):
"""Schedule primitive corresponding to `te.Stage.pragma`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to add pragma, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to add pragma.
pragma_type : str
The pragma string.
"""
self.state_object = _ffi_api.StatePragma(
self.state_object, self._resolve_stage_id(stage), iterator, pragma_type
)
def reorder(self, stage, order):
"""Schedule primitive corresponding to `te.Stage.reorder`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be reordered, which can be specified by the integer index, Operation,
or output tensor of the stage.
order : List[Iterator]
Iterators in the expected order.
"""
self.state_object = _ffi_api.StateReorder(
self.state_object, self._resolve_stage_id(stage), order
)
def split(self, stage, iterator, lengths, inner_to_outer=True):
"""Schedule primitive corresponding to `te.Stage.split`.
See also the `te.Stage` for more details.
This API supports multiple split factors. (e.g. with 2 split factors, the original iterator
will be split to 3 parts, use `inner_to_outer` to control the split order)
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to be split.
lengths: List[int]
The multiple split factors. Can be None to be filled by search policy.
inner_to_outer: boolean = True
Whether the factor go from inner to outer, or from outer to inner.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
Notes
-----
If we do split on an iterator which has stages attached at it(by compute_at), the inner
most iterator of split results will become the new attach point.
"""
self.state_object, res = _ffi_api.StateSplit(
self.state_object, self._resolve_stage_id(stage), iterator, lengths, inner_to_outer
)
return res
def follow_split(self, stage, iterator, src_step_id, n_split):
"""The schedule primitive similar to split, but uses split factors from previous steps.
This step splits the iterator by the same factors as the given SplitStep.
Notes
------
This step is useful in a scenario that we have subgraph Dense -> Relu,
and we want to compute the Dense stage at ReLU. In this case, we need them to have
the same tiling structure of common outer loops.
The follow_split step could be used here to split the Dense stage and makes sure its
splitting factors are the same as the given split step for the ReLU stage.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_id : int
The index of the split step to be followed in the history.
n_split : int
The number of split level.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowSplit(
self.state_object, self._resolve_stage_id(stage), iterator, src_step_id, n_split
)
return res
def follow_fused_split(self, stage, iterator, src_step_ids, level, factor_or_nparts):
"""Schedule primitive extends to split step.
This step is used to split an iterator by the same factors
as the given list of SplitSteps and FuseSteps.
Notes
------
This step is useful in a scenario that we have a subgraph
in GPU schedule: Input -> Dense
for i.0@j.0 = ... : Bind to blockIdx.x
for i.1@j.1 = ... : Bind to threadIdx.x
for i.2@j.2 = ...
Input_shared = Input ...
for k = ...
Dense = ...
We intend to apply cooperative fetching with the input stage, while the threadIdx.x
axis is bound to an iterator generated by split & fuse step.
The follow_fused_step is used split the iterator to 2 parts, while the split factor
matches the final extent of the threadIdx.x bound iterator.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be split, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The iterator to split.
src_step_ids : List[int]
The indices of the split steps to be followed in the history.
level : int
Use the length in this split level.
factor_or_nparts : bool
True to use `factor` for split from inner to outer,
False to use `nparts` for split from outer to inner.
Returns
-------
res_its : List[Iterator]
The splitted new Iterators.
"""
self.state_object, res = _ffi_api.StateFollowFusedSplit(
self.state_object,
self._resolve_stage_id(stage),
iterator,
src_step_ids,
level,
factor_or_nparts,
)
return res
def storage_align(self, stage, iterator, factor, offset):
"""Schedule primitive corresponding to `te.Stage.storage_align`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be storage aligned, which can be specified by the integer index,
Operation, or output tensor of the stage.
iterator : Iterator
The iterator to be aligned.
factor : int
The factor in alignment specification.
offset : int
The offset in the alignment specification.
"""
self.state_object = _ffi_api.StateStorageAlign(
self.state_object, self._resolve_stage_id(stage), iterator, factor, offset
)
def compute_at(self, stage, target_stage, target_iter):
"""Schedule primitive corresponding to `te.Stage.compute_at`.
See also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The source Stage of computed at, which can be specified by the integer index,
Operation, or output tensor of the stage.
target_stage : Union[int, Operation, Tensor]
The target stage of compute_at, which can be specified by the integer index, Operation,
or output tensor of the stage.
target_iter : Iterator
The target Iterator of compute_at.
Notes
-----
After compute_at, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeAt(
self.state_object,
self._resolve_stage_id(stage),
self._resolve_stage_id(target_stage),
target_iter,
)
def compute_inline(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_inline`, see also the `te.Stage`
for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute inlined, which can be specified by the integer index,
Operation, or output tensor of the stage.
"""
self.state_object = _ffi_api.StateComputeInline(
self.state_object, self._resolve_stage_id(stage)
)
def compute_root(self, stage):
"""Schedule primitive corresponding to `te.Stage.compute_root`.
Ssee also the `te.Stage` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be marked compute at root, which can be specified by the integer index,
Operation, or output tensor of the stage.
Notes
-----
After compute_root, we need careful dependency analysis to compute the accurate bound
information. However, it is relatively expensive and complicated, so we just fill "None"
as bound for the newly created iterators.
Call ComputeDAG::InferBound on the returned state to get the complete bound information.
"""
self.state_object = _ffi_api.StateComputeRoot(
self.state_object, self._resolve_stage_id(stage)
)
def cache_read(self, stage, scope_name, reader_stages):
"""Schedule primitive corresponding to `te.Schedule.cache_read`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_read, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added read stage.
reader_stages : List[Union[int, Operation, Tensor]]
The reader stages. Each of the list can be specified by the integer index, Operation,
or output tensor of the stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache read step will insert an extra stage to the original ComputeDAG (at the back of the
target stage).
"""
reader_stage_ids = [self._resolve_stage_id(i) for i in reader_stages]
self.state_object, new_stage_id = _ffi_api.StateCacheRead(
self.state_object,
self._resolve_stage_id(stage),
scope_name,
reader_stage_ids,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def cache_write(self, stage, scope_name):
"""Schedule primitive corresponding to `te.Schedule.cache_write`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be cache_write, which can be specified by the integer index, Operation,
or output tensor of the stage.
scope_name : str
The scope name of the newly added compute stage.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Cache write step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
This step will cache write all output tensors of the target stage.
"""
self.state_object, new_stage_id = _ffi_api.StateCacheWrite(
self.state_object, self._resolve_stage_id(stage), scope_name, self.compute_dag
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def rfactor(self, stage, iterator, factor_iter_id):
"""Schedule primitive corresponding to `te.Schedule.rfactor`.
See also the `te.Schedule` for more details.
Parameters
----------
stage : Union[int, Operation, Tensor]
The Stage to be factored, which can be specified by the integer index, Operation,
or output tensor of the stage.
iterator : Iterator
The reduction iterator to be factored.
factor_iter_id : int
The position where the new iterator is placed.
Returns
-------
new_stage_op : Operator
The Operator of the new added stage.
Notes
-----
Rfactor step will insert an extra stage to the original ComputeDAG (in the front of the
target stage).
"""
self.state_object, new_stage_id = _ffi_api.StateRfactor(
self.state_object,
self._resolve_stage_id(stage),
iterator,
factor_iter_id,
self.compute_dag,
)
# Add a new stage will change all ops behind the added stage. But we still want to keep the
# original ops map, apply stage id offset to stage_id_map to make them work.
self._apply_stage_id_offset(int(new_stage_id))
self._update_stage_id_map()
return self.stages[int(new_stage_id)].op
def copy(self):
"""Do deep copy of this State."""
state = State(self.state_object, self.compute_dag)
state.stage_id_map = self.stage_id_map.copy()
return state
def _resolve_stage_id(self, stage_id):
if isinstance(stage_id, Operation):
return self.stage_id_map[stage_id]
if isinstance(stage_id, Tensor):
return self.stage_id_map[stage_id.op]
if isinstance(stage_id, int):
return stage_id
raise ValueError(
"Invalid stage: " + stage_id + " . Expect to be a int, Operation or Tensor"
)
def _update_stage_id_map(self):
for index, stage in enumerate(self.stages):
self.stage_id_map[stage.op] = index
def _apply_stage_id_offset(self, start_id, offset=1):
for key, value in self.stage_id_map.items():
if value >= start_id:
self.stage_id_map[key] = value + offset
def __getitem__(self, key):
if isinstance(key, Tensor):
key = key.op
if isinstance(key, Operation):
return self.stages[self.stage_id_map[key]]
raise ValueError("Invalid item: " + key + " . Expect to be a Operation or Tensor")
def __str__(self):
return str(self.state_object)
def __eq__(self, other):
return _ffi_api.StateEqual(self.state_object, other.state_object)
| 24,583 | 38.71567 | 99 | py |
tvm | tvm-main/python/tvm/auto_scheduler/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Register FFI APIs from C++ for the namespace tvm.auto_scheduler. """
import tvm._ffi
tvm._ffi._init_api("auto_scheduler", __name__)
| 924 | 39.217391 | 72 | py |
tvm | tvm-main/python/tvm/auto_scheduler/measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Distributed measurement infrastructure to measure the runtime costs of tensor programs.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
We separate the measurement into two steps: build and run.
A builder builds the executable binary files and a runner runs the binary files to
get the measurement results. The flow of data structures is
. `ProgramBuilder` `ProgramRunner`
`MeasureInput` -----------------> `BuildResult` ----------------> `MeasureResult`
We implement these in python to utilize python's multiprocessing and error handling.
"""
import logging
import multiprocessing
import os
import shutil
import tempfile
import time
import tvm._ffi
from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope
from tvm.contrib import ndk, tar
from tvm.contrib.popen_pool import PopenPoolExecutor, PopenWorker, StatusKind
from tvm.driver import build_module
from tvm.ir import transform
from tvm.runtime import Object, module, ndarray
from tvm.target import Target
from . import _ffi_api
from .loop_state import StateObject
from .utils import (
call_func_with_timeout,
check_remote,
get_const_tuple,
get_func_name,
make_traceback_info,
request_remote,
)
from .workload_registry import (
deserialize_workload_registry_entry,
serialize_workload_registry_entry,
)
# pylint: disable=invalid-name
logger = logging.getLogger("auto_scheduler")
# The time cost for measurements with errors
# We use 1e10 instead of sys.float_info.max for better readability in log
MAX_FLOAT = 1e10
class BuildFunc:
"""store build_func name and callable to class variable.
name: str = "default"
The name of registered build function.
build_func: callable = tar.tar
The callable of registered build function.
"""
name = "default"
build_func = tar.tar
@tvm._ffi.register_object("auto_scheduler.MeasureCallback")
class MeasureCallback(Object):
"""The base class of measurement callback functions."""
@tvm._ffi.register_object("auto_scheduler.PythonBasedMeasureCallback")
class PythonBasedMeasureCallback(MeasureCallback):
"""Base class for measure callbacks implemented in python"""
def __init__(self):
def callback_func(policy, inputs, results):
self.callback(policy, inputs, results)
self.__init_handle_by_constructor__(_ffi_api.PythonBasedMeasureCallback, callback_func)
def callback(self, policy, inputs, results):
"""The callback function.
Parameters
----------
policy: auto_scheduler.search_policy.SearchPolicy
The search policy.
inputs : List[auto_scheduler.measure.MeasureInput]
The measurement inputs
results : List[auto_scheduler.measure.MeasureResult]
The measurement results
"""
raise NotImplementedError
@tvm._ffi.register_object("auto_scheduler.MeasureInput")
class MeasureInput(Object):
"""Store the input of a measurement.
Parameters
----------
task : SearchTask
The SearchTask of this measurement.
state : Union[State, StateObject]
The State to be measured.
"""
def __init__(self, task, state):
state = state if isinstance(state, StateObject) else state.state_object
self.__init_handle_by_constructor__(_ffi_api.MeasureInput, task, state)
def serialize(self):
"""Custom serialization to workaround MeasureInput not exposing all its
members to the TVM ffi interface.
Note that we do not implement __getstate__ as it does not seem to work
with initialization of the workload registry (maybe because of
initialization order?).
"""
return [
_ffi_api.SerializeMeasureInput(self),
serialize_workload_registry_entry(self.task.workload_key),
]
@staticmethod
def deserialize(data):
inp = _ffi_api.DeserializeMeasureInput(data[0])
deserialize_workload_registry_entry(data[1])
return recover_measure_input(inp)
@tvm._ffi.register_object("auto_scheduler.BuildResult")
class BuildResult(Object):
"""Store the result of a build.
Parameters
----------
filename : Optional[str]
The filename of built binary file.
args : List[Tensor]
The arguments.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
time_cost : float
The time cost of build.
"""
def __init__(self, filename, args, error_no, error_msg, time_cost):
filename = filename if filename else ""
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.BuildResult, filename, args, error_no, error_msg, time_cost
)
@tvm._ffi.register_object("auto_scheduler.MeasureResult")
class MeasureResult(Object):
"""Store the results of a measurement.
Parameters
----------
costs : List[float]
The time costs of execution.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
all_cost : float
The time cost of build and run.
timestamp : float
The time stamps of this measurement.
"""
def __init__(self, costs, error_no, error_msg, all_cost, timestamp):
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.MeasureResult, costs, error_no, error_msg, all_cost, timestamp
)
def recover_measure_input(inp, rebuild_state=False):
"""
Recover a deserialized MeasureInput by rebuilding the missing fields.
1. Rebuid the compute_dag in inp.task
2. (Optional) Rebuild the stages in inp.state
Parameters
----------
inp: MeasureInput
The deserialized MeasureInput
rebuild_state: bool = False
Whether rebuild the stages in MeasureInput.State
Returns
-------
new_input: MeasureInput
The fully recovered MeasureInput with all fields rebuilt.
"""
# pylint: disable=import-outside-toplevel
from .search_task import SearchTask # lazily import to avoid recursive dependency
task = inp.task
task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host)
new_task = SearchTask(
workload_key=task.workload_key,
target=task.target,
hardware_params=task.hardware_params,
layout_rewrite_option=task.layout_rewrite_option,
task_inputs=list(task.task_input_names),
)
if rebuild_state:
new_state = new_task.compute_dag.infer_bound_from_state(inp.state)
else:
new_state = inp.state
return MeasureInput(new_task, new_state)
@tvm._ffi.register_object("auto_scheduler.ProgramBuilder")
class ProgramBuilder(Object):
"""The base class of ProgramBuilders."""
def build(self, measure_inputs, verbose=1):
"""Build programs and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
"""
return _ffi_api.ProgramBuilderBuild(self, measure_inputs, verbose)
@tvm._ffi.register_object("auto_scheduler.ProgramRunner")
class ProgramRunner(Object):
"""The base class of ProgramRunners."""
def run(self, measure_inputs, build_results, verbose=1):
"""Run measurement and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
build_results : List[BuildResult]
A List of BuildResult to be ran.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program running.
Returns
-------
res : List[MeasureResult]
"""
return _ffi_api.ProgramRunnerRun(self, measure_inputs, build_results, verbose)
@tvm._ffi.register_object("auto_scheduler.ProgramMeasurer")
class ProgramMeasurer(Object):
"""
Measurer that measures the time costs of tvm programs
This class combines ProgramBuilder and ProgramRunner, and provides a simpler API.
Parameters
----------
builder : ProgramBuilder
The ProgramBuilder to build programs
runner : ProgramRunner
The ProgramRunner to measure programs.
callbacks : List[MeasureCallback]
Callbacks to be called after each measurement batch
verbose : int
The Verbosity level: 0 for silent, 1 to output information during program
max_continuous_error : Optional[int]
The number of allowed maximum continuous error before stop the tuning
"""
def __init__(self, builder, runner, callbacks, verbose, max_continuous_error=None):
max_continuous_error = max_continuous_error or -1 # -1 means using the default value
self.__init_handle_by_constructor__(
_ffi_api.ProgramMeasurer, builder, runner, callbacks, verbose, max_continuous_error
)
@tvm._ffi.register_object("auto_scheduler.LocalBuilder")
class LocalBuilder(ProgramBuilder):
"""LocalBuilder use local CPU cores to build programs in parallel.
Parameters
----------
timeout : int = 15
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int = multiprocessing.cpu_count()
Number of threads used to build in parallel.
build_func: callable or str = "default"
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=15, n_parallel=multiprocessing.cpu_count(), build_func="default"):
if build_func == "default":
BuildFunc.name = "default"
BuildFunc.build_func = tar.tar
elif build_func == "ndk":
BuildFunc.name = "ndk"
BuildFunc.build_func = ndk.create_shared
elif callable(build_func):
BuildFunc.name = "custom"
BuildFunc.build_func = build_func
else:
raise ValueError("Invalid build_func" + build_func)
self.__init_handle_by_constructor__(
_ffi_api.LocalBuilder, timeout, n_parallel, BuildFunc.name
)
@tvm._ffi.register_object("auto_scheduler.LocalRunner")
class LocalRunner(ProgramRunner):
"""LocalRunner that uses local CPU/GPU to measures the time cost of programs.
Parameters
----------
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
if enable_cpu_cache_flush:
number = 1
min_repeat_ms = 0
self.__init_handle_by_constructor__(
_ffi_api.LocalRunner,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
@tvm._ffi.register_object("auto_scheduler.RPCRunner")
class RPCRunner(ProgramRunner):
"""RPCRunner that uses RPC call to measures the time cost of programs on remote devices.
Or sometime we may need to use RPC even in local running to insulate the thread environment.
(e.g. running CUDA programs)
Parameters
----------
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
self.__init_handle_by_constructor__(
_ffi_api.RPCRunner,
key,
host,
port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
if check_remote(key, host, port, priority, timeout):
print("Get devices for measurement successfully!")
else:
raise RuntimeError(
"Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status."
)
class LocalRPCMeasureContext:
"""A context wrapper for running RPCRunner locally.
This will launch a local RPC Tracker and local RPC Server.
Parameters
----------
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
device: int = 0
Which device to run on if multiple are available.
"""
def __init__(
self,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
):
# pylint: disable=import-outside-toplevel
from tvm.rpc.server import Server
from tvm.rpc.tracker import Tracker
self.tracker = Tracker(port=9000, port_end=10000, silent=True)
device_key = f"$local$device${self.tracker.port}"
self.server = Server(
port=self.tracker.port,
port_end=10000,
key=device_key,
silent=True,
tracker_addr=("127.0.0.1", self.tracker.port),
)
self.runner = RPCRunner(
device_key,
"127.0.0.1",
self.tracker.port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
device,
)
# Wait for the processes to start
time.sleep(0.5)
def __del__(self):
# Close the tracker and server before exit
self.tracker.terminate()
self.server.terminate()
time.sleep(0.5)
class MeasureErrorNo(object):
"""Error type for MeasureResult."""
NO_ERROR = 0 # No error
INSTANTIATION_ERROR = 1 # Errors happen when apply transform steps from init state
COMPILE_HOST = 2 # Errors happen when compiling code on host (e.g., tvm.build)
COMPILE_DEVICE = 3 # Errors happen when compiling code on device
# (e.g. OpenCL JIT on the device)
RUNTIME_DEVICE = 4 # Errors happen when run program on device
WRONG_ANSWER = 5 # Answer is wrong when compared to a reference output
BUILD_TIMEOUT = 6 # Timeout during compilation
RUN_TIMEOUT = 7 # Timeout during run
UNKNOWN_ERROR = 8 # Unknown error
def _local_build_worker(inp_serialized, build_func, verbose):
tic = time.time()
inp = MeasureInput.deserialize(inp_serialized)
task = inp.task
task.target, task.target_host = Target.canon_target_and_host(task.target, task.target_host)
error_no = MeasureErrorNo.NO_ERROR
error_msg = None
args = []
try:
sch, args = task.compute_dag.apply_steps_from_state(
inp.state, layout_rewrite=task.layout_rewrite_option
)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.INSTANTIATION_ERROR
error_msg = make_traceback_info()
if error_no == 0:
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, "tmp_func." + build_func.output_format)
try:
with transform.PassContext().current():
func = build_module.build(sch, args, target=task.target)
func.export_library(filename, build_func)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.COMPILE_HOST
error_msg = make_traceback_info()
else:
filename = ""
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print(".", end="", flush=True)
else:
print(".E", end="", flush=True) # Build error
return filename, args, error_no, error_msg, time.time() - tic
def local_build_worker(args):
"""
Build function of LocalBuilder to be ran in the Builder thread pool.
Parameters
----------
args: Tuple[MeasureInput, callable, int]
inputs, build-func, verbose args passed to local_builder_build
Returns
-------
res : BuildResult
The build result of this Builder thread.
"""
inp, build_func, verbose = args
return _local_build_worker(inp, build_func, verbose)
@tvm._ffi.register_func("auto_scheduler.local_builder.build")
def local_builder_build(inputs, timeout, n_parallel, build_func="default", verbose=1):
"""
Build function of LocalBuilder to build the MeasureInputs to runnable modules.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be built.
timeout : int
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int
Number of threads used to build in parallel.
build_func : str = 'default'
The name of build function to process the built module.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
The build results of these MeasureInputs.
"""
assert build_func == BuildFunc.name, (
"BuildFunc.name: " + BuildFunc.name + ", but args is: " + build_func
)
executor = PopenPoolExecutor(
n_parallel, timeout, reset_global_scope, (AutotvmGlobalScope.current,)
)
tuple_res = executor.map_with_error_catching(
local_build_worker, [(i.serialize(), BuildFunc.build_func, verbose) for i in inputs]
)
results = []
for res in tuple_res:
if res.status == StatusKind.COMPLETE:
results.append(BuildResult(*res.value))
elif res.status == StatusKind.TIMEOUT:
if verbose >= 1:
print(".T", end="", flush=True) # Build timeout
results.append(BuildResult(None, [], MeasureErrorNo.BUILD_TIMEOUT, None, timeout))
elif res.status == StatusKind.EXCEPTION:
if verbose >= 1:
print(".E", end="", flush=True) # Build error
results.append(
BuildResult(None, [], MeasureErrorNo.COMPILE_HOST, repr(res.value), timeout)
)
else:
raise ValueError("Result status is not expected. Unreachable branch")
return results
TASK_INPUT_CHECK_FUNC_REGISTRY = {}
def register_task_input_check_func(func_name, f=None, override=False):
"""Register a function that checks the input buffer map.
The input function should take a list of Tensor wich indicate the Input/output Tensor of a TVM
subgraph and return a Map from the input Tensor to its buffer name.
Parameters
----------
func_name : Union[Function, str]
The check function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The check function to be registered.
override : boolean = False
Whether to override existing entry.
Examples
--------
.. code-block:: python
@auto_scheduler.register_task_input_check_func
def check_task_input_by_placeholder_name(args : List[Tensor]):
tensor_input_map = {}
for arg in args:
if isinstance(arg.op, tvm.te.PlaceholderOp):
if arg.op.name != "placeholder":
tensor_input_map[arg] = arg.op.name
return tensor_input_map
"""
global TASK_INPUT_CHECK_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in TASK_INPUT_CHECK_FUNC_REGISTRY and not override:
raise RuntimeError(f"{func_name} has been registered already")
TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def prepare_input_map(args, workload_key=None):
"""This function deals with special task inputs. Map the input Tensor of a TVM subgraph
to a specific buffer name in the global buffer map.
Parameters
----------
args : List[Tensor]
Input/output Tensor of a TVM subgraph.
workload_key: Optional[str]
The workload for which these inputs are being prepared. This
is used to identify if an input is being provided by (see
`register_task_input_buffer`).
Returns
-------
Dict[Tensor, str] :
Map from the input Tensor to its buffer name.
Notes
-----
The buffer name is specially designed, and these buffer should be provided in
`SearchTask(..., task_inputs={...})`.
"""
# pylint: disable=import-outside-toplevel
global TASK_INPUT_CHECK_FUNC_REGISTRY
from .search_task import TASK_INPUT_BUFFER_TABLE
# A dict that maps the input tensor arg to a buffer name
tensor_input_map = {}
# Case 0: Check placeholder name
for arg in args:
if isinstance(arg.op, tvm.te.PlaceholderOp):
if (
workload_key
and workload_key in TASK_INPUT_BUFFER_TABLE
and arg.op.name in TASK_INPUT_BUFFER_TABLE[workload_key]
):
tensor_input_map[arg] = arg.op.name
# Case 1: Check specific tensor inputs
for func_name in TASK_INPUT_CHECK_FUNC_REGISTRY:
func = TASK_INPUT_CHECK_FUNC_REGISTRY[func_name]
tensor_input_map.update(func(args))
return tensor_input_map
def prepare_runner_args(inp, build_res):
"""This function prepares the pre-defined arguments in `TASK_INPUT_BUFFER_TABLE` for local/rpc
runner in main process
Parameters
----------
inp : MeasureInput
Measure input to be measured.
build_res : BuildResult
Build result to be measured.
Returns
-------
List[Optional[numpy.ndarray]] :
List of arguments for running the program. If the argument does not have a pre-defined input
buffer, None is added to the list as a placeholder.
"""
# pylint: disable=import-outside-toplevel
from .search_task import get_task_input_buffer # lazily import to avoid recursive dependency
task_input_names = inp.task.task_input_names
tensor_input_map = prepare_input_map(build_res.args, inp.task.workload_key)
if not task_input_names:
tensor_input_map = {}
args = []
task_inputs_count = 0
for arg in build_res.args:
if arg in tensor_input_map:
tensor_name = tensor_input_map[arg]
if tensor_name in task_input_names:
task_input_buffer = get_task_input_buffer(inp.task.workload_key, tensor_name)
# convert tvm.NDArray to picklable numpy.ndarray
args.append(task_input_buffer.numpy())
task_inputs_count += 1
else:
raise ValueError(
f"{tensor_name} not found in task_inputs, "
f"should provide with `SearchTask(..., task_inputs={{...}})`"
)
else:
args.append(None)
if task_inputs_count != len(task_input_names):
raise RuntimeError("task_inputs not fully matched, check if there's any unexpected error")
return args
def _timed_eval_func(
inp_serialized,
build_res,
args,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
):
inp = MeasureInput.deserialize(inp_serialized)
tic = time.time()
error_no = 0
error_msg = None
try:
func = module.load_module(build_res.filename)
dev = ndarray.device(str(inp.task.target), device)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_traceback_info()
if error_no == 0:
try:
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True)
assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake"
assert len(args) == len(build_res.args)
loc_args = []
# pylint: disable=consider-using-enumerate
for idx in range(len(args)):
if args[idx] is None:
build_res_arg = build_res.args[idx]
empty_array = ndarray.empty(
get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev
)
random_fill(empty_array)
loc_args.append(empty_array)
else:
loc_args.append(ndarray.array(args[idx], dev))
dev.sync()
costs = time_f(*loc_args).results
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_traceback_info()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="", flush=True)
else:
print("*E", end="", flush=True) # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
@tvm._ffi.register_func("auto_scheduler.local_runner.run")
def local_run(
inputs,
build_results,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0,
enable_cpu_cache_flush=False,
verbose=1,
device=0,
):
"""
Run function of LocalRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
device: int = 0
Which device to run on if multiple are available.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
measure_results = []
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
worker = PopenWorker()
for inp, build_res in zip(inputs, build_results):
if build_res.error_no != 0:
res = (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
else:
args = prepare_runner_args(inp, build_res)
res = call_func_with_timeout(
worker,
timeout,
_timed_eval_func,
args=(
inp.serialize(),
build_res,
args,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
),
)
if isinstance(res, TimeoutError):
if verbose >= 1:
print("*T", end="", flush=True) # Run timeout
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
elif isinstance(res, Exception):
if verbose >= 1:
print("*E", end="", flush=True) # Run error
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUNTIME_DEVICE,
str(res),
build_res.time_cost + timeout,
time.time(),
)
measure_results.append(MeasureResult(*res))
if verbose >= 1:
print("", flush=True)
return measure_results
def _rpc_run(
inp_serialized,
build_res,
args,
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
):
inp = MeasureInput.deserialize(inp_serialized)
tic = time.time()
error_no = 0
error_msg = None
try:
# upload built module
remote = request_remote(key, host, port, priority, timeout)
remote.upload(build_res.filename)
func = remote.load_module(os.path.split(build_res.filename)[1])
dev = remote.device(str(inp.task.target), device)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_traceback_info()
if error_no == 0:
try:
stream = dev.create_raw_stream()
dev.set_raw_stream(stream)
random_fill = remote.get_function("tvm.contrib.random.random_fill")
assert (
random_fill
), "Please make sure USE_RANDOM is ON in the config.cmake on the remote devices"
assert len(args) == len(build_res.args)
loc_args = []
# pylint: disable=consider-using-enumerate
for idx in range(len(args)):
if args[idx] is None:
build_res_arg = build_res.args[idx]
empty_array = ndarray.empty(
get_const_tuple(build_res_arg.shape), build_res_arg.dtype, dev
)
random_fill(empty_array)
loc_args.append(empty_array)
else:
loc_args.append(ndarray.array(args[idx], dev))
dev.sync()
# First run for check that the kernel is correct
func.entry_func(*loc_args)
dev.sync()
costs = time_f(*loc_args).results
# clean up remote files
remote.remove(build_res.filename)
remote.remove(os.path.splitext(build_res.filename)[0] + ".so")
remote.remove("")
dev.free_raw_stream(stream)
# pylint: disable=broad-except
except Exception:
dev.free_raw_stream(stream)
costs = (MAX_FLOAT,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_traceback_info()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="")
else:
print("*E", end="") # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
def _rpc_run_worker(args):
"""Function to be ran in the RPCRunner thread pool.
Parameters
----------
args : Tuple[MeasureInput, BuildResult, ...]
Single input and build result plus the rest of the arguments to `rpc_runner_run`.
Returns
-------
res : MeasureResult
The measure result of this Runner thread.
"""
_, build_res, _, _, _, _, _, timeout, _, _, _, _, _, verbose, _ = args
if build_res.error_no != MeasureErrorNo.NO_ERROR:
return (
(MAX_FLOAT,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
try:
res = _rpc_run(*args)
# pylint: disable=broad-except
except Exception:
if verbose >= 1:
print("*E", end="") # Run error
res = (
(MAX_FLOAT,),
MeasureErrorNo.RUNTIME_DEVICE,
make_traceback_info(),
build_res.time_cost + timeout,
time.time(),
)
return res
@tvm._ffi.register_func("auto_scheduler.rpc_runner.run")
def rpc_runner_run(
inputs,
build_results,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
verbose=1,
device=0,
):
"""Run function of RPCRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements in seconds.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
device: int = 0
Which device to run on if multiple are available.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
# This pool is not doing computationally intensive work, so we can use threads
executor = PopenPoolExecutor(n_parallel)
tuple_res = executor.map_with_error_catching(
_rpc_run_worker,
[
(
inp.serialize(),
build_res,
prepare_runner_args(inp, build_res),
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
device,
)
for inp, build_res in zip(inputs, build_results)
],
)
results = []
for i, res in enumerate(tuple_res):
if res.status == StatusKind.COMPLETE:
results.append(MeasureResult(*res.value))
else:
assert res.status == StatusKind.TIMEOUT
if verbose >= 1:
print("*T", end="") # Run timeout
build_res = build_results[i]
results.append(
MeasureResult(
(MAX_FLOAT,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
)
if verbose >= 1:
print("")
return results
| 45,878 | 33.366292 | 100 | py |
tvm | tvm-main/python/tvm/auto_scheduler/search_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" The definiton of SearchTask """
import json
import os
import logging
import numpy as np
import tvm._ffi
from tvm.runtime import Object, ndarray
from tvm.driver.build_module import build
from tvm.target import Target
from .measure import LocalBuilder, LocalRunner
from .measure_record import load_best_record
from .workload_registry import make_workload_key
from .compute_dag import ComputeDAG, LayoutRewriteOption
from .cost_model import XGBModel
from .search_policy import SketchPolicy
from .workload_registry import WORKLOAD_FUNC_REGISTRY, register_workload_tensors
from . import _ffi_api
# pylint: disable=invalid-name
logger = logging.getLogger("auto_scheduler")
@tvm._ffi.register_object("auto_scheduler.HardwareParams")
class HardwareParams(Object):
"""The parameters of target hardware used to guide the search policy.
When a parameter isn't provided, it will instead use the
current machine's default value if target is specified.
TODO(jcf94): This is considered to be merged with the new Target specification:
https://discuss.tvm.apache.org/t/rfc-tvm-target-specification/6844
Parameters
----------
num_cores : int, optional
The number of device cores.
vector_unit_bytes : int, optional
The width of vector units in bytes.
cache_line_bytes : int, optional
The size of cache line in bytes.
max_shared_memory_per_block : int, optional
The max shared memory per block in bytes.
max_local_memory_per_block : int, optional
The max local memory per block in bytes.
max_threads_per_block : int, optional
The max number of threads per block.
max_vthread_extent : int, optional
The max vthread extent.
warp_size : int, optional
The thread numbers of a warp.
target : str or Target, optional
The compilation target. Used to determine default values if provided.
target_host : str or Target, optional
The compilation target host. Used to determine default values if provided.
"""
def __init__(
self,
num_cores=None,
vector_unit_bytes=None,
cache_line_bytes=None,
max_shared_memory_per_block=None,
max_local_memory_per_block=None,
max_threads_per_block=None,
max_vthread_extent=None,
warp_size=None,
target=None,
target_host=None,
):
# If target is provided, get the default paramters for this machine.
if target is not None:
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
default_params = _ffi_api.GetDefaultHardwareParams(target, target_host)
if num_cores is None:
num_cores = default_params.num_cores
if vector_unit_bytes is None:
vector_unit_bytes = default_params.vector_unit_bytes
if cache_line_bytes is None:
cache_line_bytes = default_params.cache_line_bytes
if max_shared_memory_per_block is None:
max_shared_memory_per_block = default_params.max_shared_memory_per_block
if max_local_memory_per_block is None:
max_local_memory_per_block = default_params.max_local_memory_per_block
if max_threads_per_block is None:
max_threads_per_block = default_params.max_threads_per_block
if max_vthread_extent is None:
max_vthread_extent = default_params.max_vthread_extent
if warp_size is None:
warp_size = default_params.warp_size
self.__init_handle_by_constructor__(
_ffi_api.HardwareParams,
num_cores,
vector_unit_bytes,
cache_line_bytes,
max_shared_memory_per_block,
max_local_memory_per_block,
max_threads_per_block,
max_vthread_extent,
warp_size,
)
def __str__(self):
"""Pretty printing for hardware parameter configuration."""
format_str = (
"HardwareParams:\n"
f" num_cores: {self.num_cores}\n"
f" vector_unit_bytes: {self.vector_unit_bytes}\n"
f" cache_line_bytes: {self.cache_line_bytes}\n"
f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n"
f" max_local_memory_per_block: {self.max_local_memory_per_block}\n"
f" max_threads_per_block: {self.max_threads_per_block}\n"
f" max_vthread_extent: {self.max_vthread_extent}\n"
f" warp_size: {self.warp_size}\n"
)
return format_str
@tvm._ffi.register_object("auto_scheduler.TuningOptions")
class TuningOptions(Object):
"""This controls the options of performance tuning.
Parameters
----------
num_measure_trials: int = 0
The number of measurement trials.
The search policy measures `num_measure_trials` schedules in total and returns the best one
among them.
With `num_measure_trials` == 0, the policy will do the schedule search but won't involve
measurement. This can be used to get a runnable schedule quickly without auto-tuning.
early_stopping: Optional[int]
Stop the tuning early if getting no improvement after n measurements.
num_measures_per_round: int = 64
The number of schedules to be measured at each search round.
The whole schedule search process will try a total number of `num_measure_trials` in several
rounds.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during schedule search.
builder: Union[ProgramBuilder, str] = 'local'
ProgramBuilder which builds the program.
runner: Union[ProgramRunner, str] = 'local'
ProgramRunner which runs the program and measures time costs.
measure_callbacks: Optional[List[MeasureCallback]]
Callback functions called after each measurement.
Candidates:
- auto_scheduler.RecordToFile
"""
def __init__(
self,
num_measure_trials=0,
early_stopping=None,
num_measures_per_round=64,
verbose=1,
builder="local",
runner="local",
measure_callbacks=None,
):
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder):
raise ValueError(
"Invalid builder: "
+ builder
+ " . TuningOptions expects a ProgramBuilder or string."
)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner):
raise ValueError(
"Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string."
)
self.__init_handle_by_constructor__(
_ffi_api.TuningOptions,
num_measure_trials,
early_stopping or -1,
num_measures_per_round,
verbose,
builder,
runner,
measure_callbacks,
)
# The map stores special registered buffer for measurement.
# This can be used for sparse workloads when we cannot use random tensors for measurment.
# {
# "workload_key_0": {
# "task_input_0": Tensor(...),
# "task_input_1": Tensor(...)
# },
# "workload_key_1": {
# "task_input_2": Tensor(...),
# "task_input_3": Tensor(...)
# },
# ...
# }
TASK_INPUT_BUFFER_TABLE = {}
def _save_buffer_to_file(buffer_name, buffer_data):
"""Save the current Tensor buffer to a numpy file.
File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy
"""
np_data = buffer_data.numpy()
buffer_name += "."
for i in np_data.shape:
buffer_name += f"{i}_"
buffer_name += f"{np_data.dtype}.npy"
np_data.tofile(buffer_name, " ")
def _try_load_buffer_from_file(buffer_name):
"""Try to load buffer from a numpy file, if not found, return None.
File name has a same format as `_save_buffer_to_file`.
"""
filelist = os.listdir()
for file in filelist:
if file.startswith(buffer_name + "."):
meta_info = file.split(".")[-2].split("_")
shape = [int(i) for i in meta_info[:-1]]
dtype = meta_info[-1]
buffer_data = np.fromfile(file, dtype=dtype, sep=" ")
buffer_data = buffer_data.reshape(shape)
return ndarray.array(buffer_data)
return None
def register_task_input_buffer(
workload_key, input_name, input_data, overwrite=False, save_to_file=False
):
"""Register special buffer for measurement.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
input_data : tvm.nd.NDArray
The input Tensor data.
overwrite : bool = False
Whether to overwrite the data if a name has already registered.
save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
Returns
-------
tvm.nd.NDArray
The actual registered Tensor data of this input_name. With `overwrite` set to False, will
return the original one if the name has already registered before.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if not overwrite:
if input_name not in input_table.keys():
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
elif input_name in input_table.keys():
raise RuntimeError(
"Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s"
% (input_name, "set overwrite to True or this Tensor will not be registered")
)
input_table[input_name] = input_data
if save_to_file:
_save_buffer_to_file(input_name, input_data)
return input_data
def get_task_input_buffer(workload_key, input_name):
"""Get special buffer for measurement.
The buffers are registered by `register_task_input_buffer`.
Parameters
----------
workload_key : str
The workload key of the SearchTask.
input_name : str
The name of input buffer.
Returns
-------
tvm.nd.NDArray
The registered input buffer.
"""
global TASK_INPUT_BUFFER_TABLE
if workload_key not in TASK_INPUT_BUFFER_TABLE:
TASK_INPUT_BUFFER_TABLE[workload_key] = {}
input_table = TASK_INPUT_BUFFER_TABLE[workload_key]
if input_name not in input_table:
# Try to load buffer data from local file
tensor_from_file = _try_load_buffer_from_file(input_name)
if tensor_from_file:
input_table[input_name] = tensor_from_file
# Then check for the default table, the input names extracted from a relay model will be
# stored here for we're not able to get the workload_key at that time
if input_name not in input_table:
input_table = TASK_INPUT_BUFFER_TABLE["default"]
if input_name in input_table:
return input_table[input_name]
raise ValueError(
f"{input_name} not found in TASK_INPUT_BUFFER_TABLE, "
f"should provide with `SearchTask(..., task_inputs={{...}})`"
)
@tvm._ffi.register_object("auto_scheduler.SearchTask")
class SearchTask(Object):
"""The computation information and hardware parameters for a schedule search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
compute_dag : ComputeDAG
The ComputeDAG for the corresponding compute declaration.
workload_key : str
The workload key for the corresponding compute declaration.
target : any target-like object, see Target.canon_target
The target device of this search task.
target_host : None or any target-like object, see Target.canon_target
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option used for measuring programs. If None, the default value will be
set depending on the specified target.
Auto_scheduler will find a better schedule for the specified layout rewrite option.
The NO_REWRITE and INSERT_TRANSFORM_STAGE are expected to be used when tuning a standalone
op, and the REWRITE_FOR_PRE_TRANSFORMED is expected to be used when tuning ops inside a
network.
task_inputs : Union[Dict[str, tvm.nd.NDArray], List[str]]
A dict maps the input names to input tensors or a list of input names.
Some special Tensor used as inputs in program measuring. Usually we do not need to care
about it, but for special workloads like Sparse computation the Sparse Tensor input are
meaningful that we cannot use random input directly.
task_inputs_overwrite : bool = False
Whether to overwrite the data if a name has already in the global table.
task_inputs_save_to_file : bool = False
Whether to save the data to a local file as well. This can be reused to resume the last
tuning process.
desc: str = ""
The description string of this task.
Examples
--------
.. code-block:: python
# We support two ways to create a search task
# Way 1: create a task by a workload generation function.
# The `workload_func` is a function decorated by @auto_scheduler.register_workload
task = SearchTask(func=workload_func, args=args, target=target)
# Way 2: create a task by a workload_key.
# The `workload_key` is a string, which can be either a hash key or a json-serialized
# tuple(func, args).
task = SearchTask(workload_key=workload_key, target=target)
"""
def __init__(
self,
func=None,
args=None,
compute_dag=None,
workload_key=None,
target=None,
target_host=None,
hardware_params=None,
layout_rewrite_option=None,
task_inputs=None,
task_inputs_overwrite=False,
task_inputs_save_to_file=False,
desc="",
):
assert (
func is not None or workload_key is not None
), "Either a workload generation function or a workload key should be provided"
if func is not None:
workload_key = make_workload_key(func, args)
if compute_dag is None:
compute_dag = ComputeDAG(workload_key)
assert target is not None, "Must specify a target."
target, target_host = Target.canon_target_and_host(target, target_host)
if layout_rewrite_option is None:
layout_rewrite_option = LayoutRewriteOption.get_target_default(target)
task_input_names = []
if isinstance(task_inputs, list):
task_input_names = task_inputs
elif isinstance(task_inputs, dict):
for input_name in task_inputs:
register_task_input_buffer(
workload_key,
input_name,
task_inputs[input_name],
task_inputs_overwrite,
task_inputs_save_to_file,
)
task_input_names.append(input_name)
elif task_inputs is not None:
raise ValueError("task_inputs should be a dict or a list.")
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
compute_dag,
workload_key,
target,
target_host,
hardware_params,
layout_rewrite_option,
task_input_names,
desc,
)
def tune(self, tuning_options, search_policy=None, adaptive_training=False):
"""Run auto scheduling search for a task
Parameters
----------
tuning_options : TuningOptions
Tuning and measurement options.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
"""
if search_policy is None:
cost_model = XGBModel(adaptive_training=adaptive_training)
search_policy = SketchPolicy(self, cost_model)
_ffi_api.AutoSchedule(search_policy, tuning_options)
def apply_best(self, log_file, include_compatible=False, layout_rewrite_option=None):
"""Apply the history best from a log file and return the schedule.
Parameters
----------
log_file : str
The name of the log file.
include_compatible: bool
When set to True, all compatible records in the log file will be considered.
layout_rewrite_option : Optional[LayoutRewriteOption]
The layout rewrite option.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
inp, _ = load_best_record(
log_file, self.workload_key, include_compatible=include_compatible
)
if inp is None:
raise RuntimeError(
f"Cannot find any valid schedule for {self.workload_key} in file {log_file}"
)
sch, args = self.compute_dag.apply_steps_from_state(
inp.state, layout_rewrite_option or self.layout_rewrite_option
)
return sch, args
def print_best(self, log_file, print_mode="schedule"):
"""Print the best schedule as python schedule API code or CUDA source code.
Parameters
----------
log_file : str
The name of the log file
print_mode: str
if "schedule", print the best schedule as python schedule API code.
if "cuda", print the best schedule as CUDA source code.
Returns
-------
code: str
The best schedule code in python API or CUDA source code
"""
inp, _ = load_best_record(log_file, self.workload_key)
if inp is None:
raise RuntimeError(
f"Cannot find any valid schedule for {self.workload_key} in file {log_file}"
)
if print_mode == "schedule":
return self.compute_dag.print_python_code_from_state(inp.state)
if print_mode == "cuda":
assert self.target.kind.name == "cuda"
sch, args = self.compute_dag.apply_steps_from_state(inp.state)
func = build(sch, args, "cuda")
return func.imported_modules[0].get_source()
raise ValueError(f"Invalid print_mode: {print_mode}")
def __getstate__(self):
self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host)
return {
"compute_dag": self.compute_dag,
"workload_key": self.workload_key,
"target": self.target,
"target_host": self.target_host,
"hardware_params": self.hardware_params,
"layout_rewrite_option": self.layout_rewrite_option,
"task_input_names": self.task_input_names,
"desc": self.desc,
}
def __setstate__(self, state):
# Register the workload if needed
try:
workload = json.loads(state["workload_key"])
except Exception: # pylint: disable=broad-except
raise RuntimeError(f"Invalid workload key {state['workload_key']}")
# workload[0] is either the compute function name or the ComputeDAG hash.
# The compute functions are already registered when importing TVM, so here
# we only register the ComputeDAG workloads. If the same workload has
# already been registered, the later registration overrides the prvious one.
if workload[0] not in WORKLOAD_FUNC_REGISTRY:
register_workload_tensors(state["workload_key"], state["compute_dag"].tensors)
state["target"], state["target_host"] = Target.canon_target_and_host(
state["target"], state["target_host"]
)
self.__init_handle_by_constructor__(
_ffi_api.SearchTask,
state["compute_dag"],
state["workload_key"],
state["target"],
state["target"].host,
state["hardware_params"],
state["layout_rewrite_option"],
state["task_input_names"],
state["desc"],
)
def create_task(func, args, target, target_host=None, hardware_params=None):
"""THIS API IS DEPRECATED.
Create a search task.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Union[Tuple[Any, ...], List[Any]]
The args of the function.
target : Union[tvm.target.Target, str]
The target device of this search task.
target_host : Optional[Union[tvm.target.Target, str]]
The target host device of this search task.
hardware_params : Optional[HardwareParams]
Hardware parameters used in this search task.
Returns
-------
SearchTask: the created task
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide"
)
def auto_schedule(task, search_policy=None, tuning_options=TuningOptions()):
"""THIS API IS DEPRECATED.
Run auto scheduling search for a task.
Parameters
----------
task : SearchTask
The SearchTask for the computation declaration.
search_policy : Optional[SearchPolicy]
The search policy to be used for schedule search.
tuning_options : Optional[TuningOptions]
Tuning and measurement options.
Returns
-------
A `te.Schedule` and the a list of `te.Tensor` to be used in `tvm.lower` or `tvm.build`.
"""
raise ValueError(
'The API "auto_scheduler.create_task" is deprecated.'
"See https://github.com/apache/tvm/pull/7028 for the upgrade guide."
)
| 23,856 | 35.703077 | 100 | py |
tvm | tvm-main/python/tvm/auto_scheduler/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-import, redefined-builtin
""" Namespace for TVM Auto-scheduler. """
from . import (
compute_dag,
dispatcher,
feature,
loop_state,
measure,
measure_record,
relay_integration,
search_policy,
search_task,
task_scheduler,
utils,
workload_registry,
)
# Shortcut
from .compute_dag import (
ComputeDAG,
LayoutRewriteOption,
get_shape_from_rewritten_layout,
)
from .cost_model import RandomModel, XGBModel
from .dispatcher import ApplyHistoryBest, ApplyHistoryBestOrSample, DispatchContext
from .measure import (
LocalBuilder,
LocalRPCMeasureContext,
LocalRunner,
MeasureInput,
MeasureResult,
RPCRunner,
register_task_input_check_func,
)
from .measure_record import (
RecordReader,
RecordToFile,
load_best_record,
load_records,
save_records,
)
from .relay_integration import (
extract_tasks,
is_auto_scheduler_enabled,
remove_index_check,
rewrite_compute_body,
rewrite_tensor_shape,
)
from .search_policy import (
EmptyPolicy,
PreloadCustomSketchRule,
PreloadMeasuredStates,
SketchPolicy,
)
from .search_task import (
HardwareParams,
SearchTask,
TuningOptions,
auto_schedule,
create_task,
)
from .task_scheduler import TaskScheduler
from .workload_registry import make_workload_key, register_workload
| 2,169 | 25.790123 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.