repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/relay/op/contrib/tensorrt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, logging-format-interpolation
"""TensorRT supported operators."""
import logging
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np # type: ignore
import tvm
from tvm import relay
from tvm.ir import Op
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.dataflow_pattern import (
is_constant,
is_op,
is_tuple,
is_tuple_get_item,
wildcard,
)
from tvm.relay.expr import Call, Constant, TupleGetItem
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
from tvm.relay.op.contrib.register import register_pattern_table
logger = logging.getLogger("TensorRT")
def is_tensorrt_compiler_enabled() -> bool:
return tvm.get_global_func("relay.ext.tensorrt.is_runtime_enabled", True) is not None
def is_tensorrt_runtime_enabled() -> bool:
"""Check if the TensorRT graph executor is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.ext.tensorrt.is_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def get_tensorrt_target() -> tvm.target.Target:
"""Returns the current Target, which must be of kind "tensorrt"."""
target = tvm.target.Target.current()
if target is None or target.kind.name != "tensorrt":
# Create the default target.
return tvm.target.Target("tensorrt")
return target
def get_tensorrt_version() -> Tuple[int, int, int]:
"""Returns the version of TensorRT to assume during compilation.
In order of preference this is taken from:
- The current "tensorrt" target's "tensorrt_version" attribute string.
- The version linked to the TVM runtime.
- (6, 0, 1)
Returns
-------
ret: Tuple[int, int, int]
TensorRT version as a tuple of (major, minor, patch).
"""
# cf logic in tensorrt/codegen.cc::SaveGlobalAttributes
# First check for version in target.
target = get_tensorrt_target()
version = target.attrs["tensorrt_version"]
if len(version) == 3:
return int(version[0]), int(version[1]), int(version[2])
assert len(version) == 0
# Next, ask runtime for its version.
if is_tensorrt_runtime_enabled():
get_version = tvm.get_global_func("relay.ext.tensorrt.get_version")
version = get_version()
assert len(version) == 3
return int(version[0]), int(version[1]), int(version[2])
# Finally, use default.
logger.warning(
"TVM was not built against TensorRT and no version was provided in the 'tensorrt' target."
"Defaulting to 6.0.1."
)
return (6, 0, 1)
def get_tensorrt_use_implicit_batch_mode() -> bool:
"""Returns the "use_implicit_batch" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["use_implicit_batch"]
def get_tensorrt_remove_no_mac_subgraphs() -> bool:
"""Returns the "remove_no_mac_subgraphs" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["remove_no_mac_subgraphs"]
def get_tensorrt_use_fp16() -> bool:
"""Returns the "use_fp16" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["use_fp16"]
def partition_for_tensorrt(
mod: tvm.IRModule,
params: Optional[Dict[str, tvm.nd.NDArray]] = None,
# CAUTION: Can't use default Target("tensorrt") here since the target kind is only available
# if is_tensorrt_compiler_enabled() == True.
target: Optional[tvm.target.Target] = None,
) -> tvm.IRModule:
"""Partition all functions in mod to greedily offload supported operators to TensorRT.
Parameters
----------
mod : tvm.IRModule
The module to partition.
target : tvm.target.Target
A target of kind "tensorrt" describing additional partitioning and compilation options.
params : Optional[Dict[str, tvm.nd.NDArray]]
Constant input parameters.
Returns
-------
partitioned_mod : tvm.IRModule
The partitioned module.
"""
assert is_tensorrt_compiler_enabled(), "Can only partition for TensorRT if it is enabled"
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
if target is None:
# Use a default target. The get_tensorrt_target() function will similarly create an
# equivalent default target when compilation continues after partitioning.
target = tvm.target.Target("tensorrt")
seq = tvm.transform.Sequential(
[
transform.InferType(),
RemoveDropoutPass(),
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(
{
"nn.conv1d": ["NCW", "default"],
"nn.conv2d": ["NCHW", "default"],
"nn.conv3d": ["NCDHW", "default"],
"nn.conv2d_transpose": ["NCHW", "default"],
}
),
transform.FoldConstant(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("tensorrt"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
transform.InferType(),
]
)
with target:
mod = seq(mod)
mod = prune_tensorrt_subgraphs(mod)
return mod
def is_supported_trt_type(typ: Union[tvm.ir.TensorType, tvm.ir.TupleType], op_name: str) -> bool:
"""Check whether a type is supported by TensorRT."""
supported_dtypes = ["float32"]
if get_tensorrt_use_fp16():
supported_dtypes.append("float16")
if isinstance(typ, tvm.ir.TensorType):
if typ.dtype not in supported_dtypes:
logger.info(f"{op_name}: Only {supported_dtypes} tensor dtypes are supported.")
return False
dims = typ.shape
if get_tensorrt_use_implicit_batch_mode():
# The first dimension can be Any.
dims = dims[1:]
for dim in dims:
if isinstance(dim, tvm.tir.expr.Any):
logger.info(f"{op_name}: Only statically known tensor shapes are supported.")
return False
elif isinstance(typ, tvm.ir.TupleType):
for field_type in typ.fields:
if not is_supported_trt_type(field_type, op_name):
return False
else:
logger.info(f"{op_name}: Type {typ} is not supported.")
return False
return True
def get_op_name(expr: relay.expr.Expr) -> str:
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr: relay.expr.Expr) -> List[relay.expr.Expr]:
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr: relay.expr.Expr) -> Any:
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
CheckFunc = Callable[[Any, List[relay.expr.Expr], str], bool]
def make_predicate(checker: CheckFunc) -> Callable[[relay.expr.Expr], bool]:
"""Returns the pattern predicate which performs the standard checks, then invokes the
more primitive checker."""
def predicate(expr: relay.expr.Expr) -> bool:
op_name = get_op_name(expr)
attrs = get_attrs(expr)
args = get_args(expr)
if not all([is_supported_trt_type(arg.checked_type, op_name) for arg in args]):
return False
if not checker(attrs, args, op_name):
return False
logger.info(f"{op_name}: Predicate passes")
return True
return predicate
standard_predicate = make_predicate(lambda attrs, args, op_name: True)
def make_trt_version_checker(version: Tuple[int, int, int]) -> CheckFunc:
"""Helper for ops which require a minimum TRT version"""
def checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
if get_tensorrt_version() < version:
logger.info(
f"{op_name}: requires TensorRT version {'.'.join(map(str, version))} or higher."
)
return False
return True
return checker
def make_and_checker(*checkers: CheckFunc) -> CheckFunc:
def checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
return all([c(attrs, args, op_name) for c in checkers])
return checker
def multiply_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Helper for multiply operations."""
shapes = [
[int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]
for arg in args
]
# TODO(mbs): Follow up
# Batched multiply operations don't work in implicit batch mode. The following shapes
# have been excluded because they occur in PT MaskRCNN model. The long term solution is
# to switch to explicit batch mode after performance regressions are solved.
if all([list(map(int, shape)) in [[300, 64, 7, 7], [300, 1, 1, 1]] for shape in shapes]):
logger.info(f"{op_name}: Excluding since problematic in implicit batch mode")
return False
return True
def reduce_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Helper for reduce operations."""
if get_tensorrt_use_implicit_batch_mode() and (not attrs.axis or len(attrs.axis) == 0):
logger.info(f"{op_name}: cannot reduce to scalar.")
return False
if attrs.exclude:
logger.info(f"{op_name}: exclude not supported.")
return False
if get_tensorrt_use_implicit_batch_mode() and any([x == 0 for x in map(int, attrs.axis)]):
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def add_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if add is supported by TensorRT."""
shapes = [
[int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]
for arg in args
]
# Scalars require explicit batch mode.
if get_tensorrt_use_implicit_batch_mode() and any([len(shape) < 1 for shape in shapes]):
logger.info(f"{op_name}: Scalars not supported in implicit batch mode")
return False
if (
not get_tensorrt_use_implicit_batch_mode()
and (isinstance(args[0], Constant) or isinstance(args[1], Constant))
and len(shapes[0]) > 0
and len(shapes[1]) > 0
and shapes[0][0] == shapes[1][0]
and shapes[0][0] != 1
and (len(shapes[0]) > 3 or len(shapes[1]) > 3)
):
logger.info(f"{op_name}: bug in TRT with adding batched constants.")
return False
return True
def batch_norm_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.batch_norm is supported by TensorRT."""
if len(args[0].checked_type.shape) == 5 and get_tensorrt_version() < (6, 0, 1):
logger.info(f"{op_name}: TensorRT 6.0.1 or higher is required for rank 5 inputs.")
return False
if len(args[0].checked_type.shape) > 5:
logger.info(f"{op_name}: Input rank must be 5 or less.")
return False
if int(attrs.axis) not in (1, 3):
logger.info(f"{op_name}: axis is {int(attrs.axis)} but must be 1 or 3.")
return False
return True
def softmax_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.softmax is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def conv1d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv1d is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCW.")
return False
if attrs.kernel_layout != "OIW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIW.")
return False
return True
def conv2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv2d is supported by TensorRT."""
assert len(args) == 2
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCHW.")
return False
if attrs.kernel_layout != "OIHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCHW.")
return False
return True
def dense_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: weight must be constant")
return False
input_rank = len(args[0].checked_type.shape)
weight_rank = len(args[1].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info(f"{op_name}: input has rank {input_rank} but must be 2, 3 or 4.")
return False
if weight_rank != 2:
logger.info(f"{op_name}: weight has rank {weight_rank} but must be 2.")
return False
return True
def batch_matmul_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and len(args[0].checked_type.shape) != len(
args[1].checked_type.shape
):
logger.info(f"{op_name}: requires use_implict_batch=False.")
return False
return True
def layer_norm_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Check if dense is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: requires use_implict_batch=False.")
return False
return True
def bias_add_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.bias_add is supported by TensorRT."""
input_rank = len(args[0].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info(f"{op_name}: input rank is {input_rank} but must be 2, 3 or 4.")
return False
return True
def max_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.max_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info(f"{op_name}: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
def avg_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.avg_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
if (
attrs.count_include_pad
and len(attrs.padding) == 4
and (
int(attrs.padding[0]) != int(attrs.padding[2])
or int(attrs.padding[1]) != int(attrs.padding[3])
)
):
logger.info(
f"{op_name}: inclusive-counted blended or average "
"pooling is not supported in combination with asymmetric padding"
)
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info(f"{op_name}: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
def global_max_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.global_max_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
return True
def global_avg_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.global_avg_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
return True
def expand_dims_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if expand_dims is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def squeeze_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if squeeze is supported by TensorRT."""
if not attrs.axis:
logger.info(f"{op_name}: must explicitly set axis.")
return False
if get_tensorrt_use_implicit_batch_mode() and any([axis == 0 for axis in map(int, attrs.axis)]):
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def concatenate_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if concatenate is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode():
if int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
if not isinstance(args[0], relay.Tuple):
logger.info("f{op_name}: concatenate must be applied to a literal tuple")
return False
for tuple_input in args[0].fields:
if isinstance(tuple_input, Constant):
logger.info(f"{op_name}: can't concatenate tensors with constants.")
return False
return True
def split_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if split is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def conv2d_transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv2d_transpose is supported by TensorRT."""
if attrs.data_layout != "NCHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCHW.")
return False
if attrs.kernel_layout != "OIHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCHW.")
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info(f"{op_name}: dilation rate must be 1.")
return False
return True
def transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if transpose is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axes[0]) != 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def layout_transform_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if layout_transform is supported by TensorRT."""
if (attrs.src_layout, attrs.dst_layout) not in [
("NCHW", "NHWC"),
("NHWC", "NCHW"),
("NDHWC", "NCDHW"),
("NCDHW", "NDHWC"),
]:
logger.info(f"{op_name}: {attrs.src_layout} to {attrs.dst_layout} is not supported.")
return False
return True
def reshape_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if reshape is supported by TensorRT."""
if any([x < -1 for x in map(int, attrs.newshape)]):
logger.info(f"{op_name}: new shape dims must be explicit.")
return False
if get_tensorrt_use_implicit_batch_mode():
shape = args[0].checked_type.shape
new_shape = attrs.newshape
if len(new_shape) == 0 or len(shape) == 0:
logger.info(f"{op_name}: Can't reshape to or from scalar.")
return False
dynamic_reshape = any([isinstance(x, tvm.tir.expr.Any) for x in shape])
if dynamic_reshape:
# Make sure that the batch dim is unmodified.
if int(new_shape[0]) < 0:
for shape_val, new_shape_val in zip(shape[1:], new_shape[1:]):
if not (
isinstance(shape_val, (int, tvm.tir.expr.IntImm))
and isinstance(new_shape_val, (int, tvm.tir.expr.IntImm))
and int(shape_val) == int(new_shape_val)
):
logger.info(f"{op_name}: can't modify batch dimension")
return False
elif int(new_shape[0]) > 0:
# Currently we only allow dim[0] to be Any, so this branch will always be False
if not (
isinstance(shape[0], (int, tvm.tir.expr.IntImm))
and isinstance(new_shape[0], (int, tvm.tir.expr.IntImm))
and int(shape[0]) == int(new_shape[0])
):
logger.info(f"{op_name}: can't modify batch dimension")
return False
else:
shape = list(map(int, shape))
new_shape = list(map(int, new_shape))
# TRT cannot modify batch dimension.
original_volume = np.prod(shape)
# First, resolve 0.
for i, value in enumerate(new_shape):
if value == 0:
new_shape[i] = shape[i]
# Resolve -1.
for i, value in enumerate(new_shape):
if value == -1:
new_shape[i] = original_volume // np.prod([x for x in new_shape if x != -1])
# Remove batch dimension and see if volumes match
if shape[0] != new_shape[0]:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def pad_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.pad is supported by TensorRT."""
pad_value = args[1]
if not isinstance(pad_value, relay.Constant):
logger.info(f"{op_name}: pad argument must be constant")
return False
pad_value = pad_value.data.numpy().item()
if attrs.pad_mode != "constant":
logger.info(f"{op_name}: pad mode is {attrs.pad_mode} but must be constant.")
return False
if pad_value > 0.0:
logger.info(f"{op_name}: pad value is {pad_value} but must be 0.0.")
return False
if len(attrs.pad_width) not in [4, 5]:
logger.info(f"{op_name}: can only pad 4D or 5D inputs")
return False
if any([x != 0 for x in attrs.pad_width[0]]) or any([x != 0 for x in attrs.pad_width[1]]):
logger.info(f"{op_name}: can't pad batch or channel dimensions.")
return False
if len(attrs.pad_width) == 5 and any([x != 0 for x in attrs.pad_width[2]]):
logger.info(f"{op_name}: can only pad last two dimensions for 5D inputs.")
return False
return True
def strided_slice_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if strided_slice is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode():
batch_dim_begin_modified = attrs.begin[0] is not None and int(attrs.begin[0]) != 0
batch_dim_end_modified = (
attrs.end[0] is not None
and int(attrs.end[0]) != -1
and int(attrs.end[0]) != int(args[0].checked_type.shape[0])
)
if batch_dim_begin_modified or batch_dim_end_modified:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
if any([x is not None and x <= 0 for x in attrs.strides]):
logger.info(f"{op_name}: stride must be positive")
return False
length: int = len(attrs.axes) if attrs.axes is not None else len(args[0].checked_type.shape)
for i in range(0, length):
begin = int(attrs.begin[i])
if attrs.slice_mode == "end":
end = (
int(attrs.end[i])
if attrs.end[i] is not None and int(attrs.end[i]) != -1
else args[0].checked_type.shape[i]
)
size = int(end) - int(begin)
elif attrs.slice_mode == "size":
size = (
int(attrs.end[i])
if attrs.end[i] is not None and int(attrs.end[i]) != -1
else args[0].checked_type.shape[i] - begin
)
else:
logger.warning(f"{op_name}: unknown slice mode encountered")
size = 1
if int(size) < 1:
logger.info(f"{op_name}: size of slice must be at least 1")
return False
return True
def adaptive_max_pool2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.adaptive_max_pool2d is supported by TensorRT."""
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info(f"{op_name}: output size must be (1, 1).")
return False
return True
def adaptive_avg_pool2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.adaptive_avg_pool2d is supported by TensorRT."""
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info(f"{op_name}: output size must be (1, 1).")
return False
return True
def conv3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv3d is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCDHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCDHW.")
return False
if attrs.kernel_layout != "OIDHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIDHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCDHW.")
return False
return True
def max_pool_3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.max_pool3d is supported by TensorRT."""
if attrs.layout != "NCDHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCDHW.")
return False
return True
def avg_pool_3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.avg_pool3d is supported by TensorRT."""
if attrs.layout != "NCDHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCDHW.")
return False
return True
def conv3d_transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv3d_transpose is supported by TensorRT."""
if attrs.data_layout != "NCDHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCDHW.")
return False
if attrs.kernel_layout != "OIDHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIDHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCDHW.")
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info(f"{op_name}: dilation rate must be 1.")
return False
if attrs.output_padding and any([x != 0 for x in map(int, attrs.output_padding)]):
logger.info(f"{op_name}: output padding is not supported.")
return False
return True
def unary_op_pattern(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches unary operation"""
return is_op(op)(wildcard())
def unary_op_pattern_with_any_tuple(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches unary operation with literal tuple argument"""
return is_op(op)(is_tuple(None))
def binary_op_pattern(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches binary operation"""
return is_op(op)(wildcard(), wildcard())
def binary_op_pattern_with_const(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches binary operation with rhs arg a constant"""
return is_op(op)(wildcard(), is_constant())
def proj_five_op_pattern_with_const(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
return is_tuple_get_item(
is_op(op)(wildcard(), is_constant(), is_constant(), is_constant(), is_constant()), 0
)
@register_pattern_table("tensorrt")
def pattern_table() -> List[
Tuple[str, relay.dataflow_pattern.DFPattern, Callable[[relay.expr.Call], bool]]
]:
"""Get the Tensorrt compiler pattern table for supported ops."""
return [
(
"tensorrt.nn.conv3d",
binary_op_pattern_with_const("nn.conv3d"),
make_predicate(make_and_checker(make_trt_version_checker((6, 0, 1)), conv3d_checker)),
),
(
"tensorrt.nn.conv2d",
binary_op_pattern_with_const("nn.conv2d"),
make_predicate(conv2d_checker),
),
(
"tensorrt.nn.conv1d",
binary_op_pattern_with_const("nn.conv1d"),
make_predicate(conv1d_checker),
),
(
"tensorrt.nn.conv2d_transpose",
binary_op_pattern("nn.conv2d_transpose"),
make_predicate(conv2d_transpose_checker),
),
("tensorrt.squeeze", binary_op_pattern("squeeze"), make_predicate(squeeze_checker)),
("tensorrt.add", binary_op_pattern("add"), make_predicate(add_checker)),
(
"tensorrt.nn.dense",
binary_op_pattern_with_const("nn.dense"),
make_predicate(dense_checker),
),
(
"tensorrt.nn.bias_add",
binary_op_pattern("nn.bias_add"),
make_predicate(bias_add_checker),
),
(
"tensorrt.nn.batch_matmul",
binary_op_pattern("nn.batch_matmul"),
make_predicate(batch_matmul_checker),
),
("tensorrt.divide", binary_op_pattern("divide"), standard_predicate),
("tensorrt.multiply", binary_op_pattern("multiply"), make_predicate(multiply_checker)),
("tensorrt.subtract", binary_op_pattern("subtract"), standard_predicate),
("tensorrt.power", binary_op_pattern("power"), standard_predicate),
("tensorrt.maximum", binary_op_pattern("maximum"), standard_predicate),
("tensorrt.minimum", binary_op_pattern("minimum"), standard_predicate),
("tensorrt.nn.relu", unary_op_pattern("nn.relu"), standard_predicate),
(
"tensorrt.nn.leaky_relu",
unary_op_pattern("nn.leaky_relu"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
("tensorrt.nn.pad", unary_op_pattern("nn.pad"), standard_predicate),
("tensorrt.sigmoid", unary_op_pattern("sigmoid"), standard_predicate),
("tensorrt.tanh", unary_op_pattern("tanh"), standard_predicate),
("tensorrt.exp", unary_op_pattern("exp"), standard_predicate),
("tensorrt.log", unary_op_pattern("log"), standard_predicate),
("tensorrt.sqrt", unary_op_pattern("sqrt"), standard_predicate),
("tensorrt.abs", unary_op_pattern("abs"), standard_predicate),
("tensorrt.negative", unary_op_pattern("negative"), standard_predicate),
("tensorrt.nn.batch_flatten", unary_op_pattern("nn.batch_flatten"), standard_predicate),
("tensorrt.clip", unary_op_pattern("clip"), standard_predicate),
(
"tensorrt.sin",
unary_op_pattern("sin"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.cos",
unary_op_pattern("cos"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.atan",
unary_op_pattern("atan"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.ceil",
unary_op_pattern("ceil"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
("tensorrt.floor", unary_op_pattern("floor"), standard_predicate),
(
"tensorrt.erf",
unary_op_pattern("erf"),
make_predicate(make_trt_version_checker((7, 0, 0))),
),
("tensorrt.sum", unary_op_pattern("sum"), make_predicate(reduce_checker)),
("tensorrt.prod", unary_op_pattern("prod"), make_predicate(reduce_checker)),
("tensorrt.max", unary_op_pattern("max"), make_predicate(reduce_checker)),
("tensorrt.min", unary_op_pattern("min"), make_predicate(reduce_checker)),
("tensorrt.max", unary_op_pattern("max"), make_predicate(reduce_checker)),
("tensorrt.mean", unary_op_pattern("mean"), make_predicate(reduce_checker)),
(
"tensorrt.concatenate",
unary_op_pattern_with_any_tuple("concatenate"),
make_predicate(concatenate_checker),
),
(
"tensorrt.expand_dims",
unary_op_pattern("expand_dims"),
make_predicate(expand_dims_checker),
),
(
"tensorrt.layout_transform",
unary_op_pattern("layout_transform"),
make_predicate(layout_transform_checker),
),
("tensorrt.transpose", unary_op_pattern("transpose"), make_predicate(transpose_checker)),
("tensorrt.reshape", unary_op_pattern("reshape"), make_predicate(reshape_checker)),
("tensorrt.split", unary_op_pattern("split"), make_predicate(split_checker)),
("tensorrt.nn.pad", unary_op_pattern("nn.pad"), make_predicate(pad_checker)),
(
"tensorrt.strided_slice",
unary_op_pattern("strided_slice"),
make_predicate(
make_and_checker(make_trt_version_checker((5, 1, 5)), strided_slice_checker)
),
),
(
"tensorrt.nn.adaptive_avg_pool2d",
unary_op_pattern("nn.adaptive_avg_pool2d"),
make_predicate(adaptive_avg_pool2d_checker),
),
(
"tensorrt.nn.adaptive_max_pool2d",
unary_op_pattern("nn.adaptive_max_pool2d"),
make_predicate(adaptive_max_pool2d_checker),
),
(
"tensorrt.nn.max_pool3d",
unary_op_pattern("nn.max_pool3d"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), max_pool_3d_checker)
),
),
(
"tensorrt.nn.avg_pool3d",
unary_op_pattern("nn.avg_pool3d"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), avg_pool_3d_checker)
),
),
(
"tensorrt.nn.conv3d_transpose",
unary_op_pattern("nn.conv3d_transpose"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), conv3d_transpose_checker)
),
),
("tensorrt.nn.softmax", unary_op_pattern("nn.softmax"), make_predicate(softmax_checker)),
(
"tensorrt.nn.layer_norm",
unary_op_pattern("nn.layer_norm"),
make_predicate(layer_norm_checker),
),
(
"tensorrt.nn.max_pool2d",
unary_op_pattern("nn.max_pool2d"),
make_predicate(max_pool_2d_checker),
),
(
"tensorrt.nn.avg_pool2d",
unary_op_pattern("nn.avg_pool2d"),
make_predicate(avg_pool_2d_checker),
),
(
"tensorrt.nn.global_max_pool2d",
unary_op_pattern("nn.global_max_pool2d"),
make_predicate(global_max_pool_2d_checker),
),
(
"tensorrt.nn.global_avg_pool2d",
unary_op_pattern("nn.global_avg_pool2d"),
make_predicate(global_avg_pool_2d_checker),
),
(
"tensorrt.nn.batch_norm",
proj_five_op_pattern_with_const("nn.batch_norm"),
make_predicate(batch_norm_checker),
),
]
class IsComputeIntensiveGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and
its transpose, dense and batch mat-mul.
"""
def __init__(self) -> None:
ExprVisitor.__init__(self)
self.is_compute_intensive = False
def visit_call(self, call: relay.expr.Call) -> None:
compute_intensive_ops = {
"nn.conv1d",
"nn.conv2d",
"nn.conv2d_transpose",
"nn.conv3d",
"nn.conv3d_transpose",
"nn.dense",
"nn.batch_matmul",
"sum",
"prod",
"max",
"min",
"mean",
}
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op.name) in compute_intensive_ops:
self.is_compute_intensive = True
return super().visit_call(call)
def is_graph_compute_intensive(self, subgraph: relay.expr.Expr) -> bool:
"""
This function recursively visits the graph and checks if it's compute intensive"
"""
self.visit(subgraph)
return self.is_compute_intensive
def is_valid_subgraph(params: List[relay.expr.Var], body: relay.expr.Expr) -> bool:
"""Final check on whether the subgraph is valid and should be offloaded to TensorRT."""
# Remove invalid subgraphs for implicit batch mode.
if get_tensorrt_use_implicit_batch_mode():
input_batch_sizes = []
for var in params:
# In implicit batch mode, all inputs must have same batch size
# TODO: (codeislife99) : Fix different dynamic batch size inputs
if isinstance(var.checked_type, relay.TupleType):
for tupe_type in var.checked_type.fields:
# Scalar inputs not allowed
if len(tupe_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
if not isinstance(tupe_type.shape[0], tvm.tir.expr.Any):
input_batch_sizes.append(int(tupe_type.shape[0]))
else:
# Scalar inputs not allowed
if len(var.checked_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
if not isinstance(var.checked_type.shape[0], tvm.tir.expr.Any):
input_batch_sizes.append(int(var.checked_type.shape[0]))
if len(input_batch_sizes) > 1 and len(set(input_batch_sizes)) != 1:
logger.info("tensorrt: inputs have different batch sizes: %s", input_batch_sizes)
return False
if get_tensorrt_remove_no_mac_subgraphs():
if not IsComputeIntensiveGraph().is_graph_compute_intensive(body):
logger.info("tensorrt: not a compute-intensize sub-graph")
return False
return True
def prune_tensorrt_subgraphs(mod: tvm.IRModule) -> tvm.IRModule:
"""
Un-partition those partitions which:
- have no multiply-accumulates (if remove_no_mac_subgraphs is True)
- can't actually be supported by TensorRT now that we see the whole partition."""
global_vars_to_inline = [
gv
for gv in mod.get_global_vars()
if mod[gv].attrs
and mod[gv].attrs["Compiler"] == "tensorrt"
and not is_valid_subgraph(mod[gv].params, mod[gv].body)
]
return relay.transform.InlineCompilerFunctionsBoundTo(global_vars_to_inline)(mod)
class RemoveDropout(ExprMutator):
"""
Removes all nn.dropout from an expr.
"""
def visit_tuple_getitem(self, op: TupleGetItem) -> relay.expr.Expr:
visit = super().visit_tuple_getitem(op)
if visit.index != 0:
return visit
if (
isinstance(visit.tuple_value, Call)
and isinstance(visit.tuple_value.op, Op)
and visit.tuple_value.op.name == "nn.dropout"
and visit.index == 0
):
return visit.tuple_value.args[0]
return visit
@transform.function_pass(opt_level=0)
class RemoveDropoutPass:
def transform_function(
self, func: relay.function.Function, mod: tvm.IRModule, _: tvm.transform.PassContext
) -> relay.function.Function:
return RemoveDropout().visit(func)
| 43,978 | 37.209383 | 100 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/libtorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, E1102
"""Torch codegen operators"""
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def torchop(script_fn, *params):
"""Insert an Operation executed in the PyTorch JIT
The operation includes backend annotation
Currently, only tensors are supported. The shape inferrence
assumes that input shapes (and not values) determine output shapes."""
return compiler_end(
relay.op._make.torchop(
[compiler_begin(p, "torch") for p in params], script_fn.save_to_buffer()
),
"torch",
)
| 1,427 | 37.594595 | 84 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/vitis_ai.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, E1102
"""Vitis-AI codegen annotation of supported operators"""
import warnings
import numpy as np
from tvm import relay
import tvm._ffi
from tvm.relay import transform
from tvm.relay.expr import Tuple, TupleGetItem
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
# Placeholder for PyXIR module
pyxir = None
def enabled():
"""Return whether Vitis-AI support is available"""
if not tvm.get_global_func("relay.ext.vitis_ai.available", True):
print("Skip because Vitis-AI codegen is not available.")
return False
return True
@transform.function_pass(opt_level=0)
class VitisAIAnnotationPass:
"""Responsible for annotating Relay expressions for Vitis-AI DPU accelerators
Parameters
----------
compiler : str
The compiler name used for annotations (`vitis_ai`).
dpu_target : str
The Vitis AI DPU target identifier.
params : dict
A dictionary containing the module's parameters.
"""
def __init__(self, compiler, dpu_target, params):
global pyxir
try:
if pyxir is None:
pyxir = __import__("pyxir")
__import__("pyxir.frontend.tvm")
except ImportError:
# add "from None" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"The pyxir package is required for the Vitis AI backend. "
"Please install it first. "
"Help: (https://tvm.apache.org/docs/deploy/vitis_ai.html) "
) from None
self.compiler = compiler
self.dpu_target = dpu_target
self.params = params
def transform_function(self, func, mod, ctx):
"""Transform function for annotating Relay module"""
annotator = self
class Annotator(tvm.relay.ExprMutator):
"""Annotator for Vitis-AI DPU accelerators"""
def visit_tuple(self, tup):
"""Add compiler_begin and compiler_end annotations to Tuple"""
field_list = []
cond = int(hash(tup))
for field in tup.fields:
if cond in annotator.relay_ids:
field_list.append(compiler_begin(super().visit(field), annotator.compiler))
else:
field_list.append(super().visit(field))
if cond in annotator.relay_ids:
return compiler_end(Tuple(field_list), annotator.compiler)
else:
return Tuple(field_list)
def visit_tuple_getitem(self, op):
"""Add compiler_begin and compiler_end annotations to TupleGetItem"""
if int(hash(op.tuple_value)) in annotator.relay_ids:
tuple_value = compiler_begin(super().visit(op.tuple_value), annotator.compiler)
return compiler_end(TupleGetItem(tuple_value, op.index), annotator.compiler)
else:
tuple_value = super().visit(op.tuple_value)
return TupleGetItem(tuple_value, op.index)
def visit_call(self, call):
"""Add compiler_begin and compiler_end annotations to the Call expr"""
if int(hash(call)) in annotator.relay_ids:
new_args = []
for arg in call.args:
ann = compiler_begin(super().visit(arg), annotator.compiler)
new_args.append(ann)
new_call = relay.Call(call.op, new_args, call.attrs, call.type_args)
return compiler_end(new_call, annotator.compiler)
else:
return super().visit_call(call)
xgraph = pyxir.frontend.tvm.from_relay(mod, self.params, postprocessing=None)
xgraph = pyxir.partition(xgraph, targets=[self.dpu_target])
layers = xgraph.get_layers()
relay_ids = [
list(np.array(layer.attrs["relay_id"]).flatten())
for layer in layers
if layer.target == self.dpu_target
]
self.relay_ids = [item for sublist in relay_ids for item in sublist]
return Annotator().visit(func)
def annotation(mod, params, target):
"""DEPRECATED
Annotate Relay expression for offloading operators to Vitis AI DPU accelerators
NOTE: This function does the same as the next one (`partition_for_vitis_ai`) but is
still here for backward compatibility"""
# We need type information for supporting models that contain operations that don't
# have a Relay to XLayer translation
warnings.warn(
"tvm.relay.op.contrib.vitis_ai.annotation() is being deprecated."
" Please use tvm.relay.op.contrib.vitis_ai.partition_for_vitis_ai() instead. "
" Check out https://tvm.apache.org/docs/deploy/vitis_ai.html for documentation. "
)
mod = relay.transform.InferType()(mod)
mod = VitisAIAnnotationPass("vitis_ai", target, params)(mod)
return mod
def partition_for_vitis_ai(mod, params=None, dpu=None, **opts):
"""Partition the Relay expression for offloading operators to Vitis AI DPU
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
dpu : str
The DPU identifier (e.g. DPUCZDX8G-zcu104, DPUCADF8H)
Returns
-------
ret : Module
"""
if dpu is None:
raise ValueError("Please pass Vitis AI DPU identifier to the partitioning function")
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
desired_layouts_in_partition = {
"nn.conv2d": ["NHWC", "default"],
"nn.upsampling": ["NHWC"],
"image.resize2d": ["NHWC"],
}
desired_layouts_in_main = {
"nn.conv2d": ["NCHW", "default"],
"nn.upsampling": ["NCHW"],
"image.resize2d": ["NCHW"],
}
seq = tvm.transform.Sequential(
[
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(desired_layouts_in_partition),
transform.FoldConstant(),
transform.InferType(),
VitisAIAnnotationPass("vitis_ai", dpu, params),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(desired_layouts_in_main),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
return seq(mod)
| 7,525 | 36.63 | 99 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/_ethosn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expose 'is supported' functions to Python."""
import tvm._ffi
tvm._ffi._init_api("relay.ethos-n.support", __name__)
tvm._ffi._init_api("relay.backend.contrib.ethos-n", __name__)
| 969 | 39.416667 | 62 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/dnnl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, use-list-literal
"""DNNL library supported operators.
There are two ways to registering a function for an op to indicate if it is
supported by DNNL.
- The first and simplest way is to use the helper so that
users only need to provide the operator name and a boolean value to indicate if
it is supported. For example:
.. code-block:: python
add = _register_external_op_helper("add")
add = _register_external_op_helper("add", True)
add = _register_external_op_helper("add", False)
- The other way is to implement the function by themselves to
check the attributes of the op and decide if it should be offloaded to DNNL.
"""
import logging
from functools import reduce
import tvm.ir
from tvm import relay
from tvm.ir import Op
from tvm.relay import expr as _expr
from tvm.relay import transform
from tvm.relay.analysis import analysis as _analysis
from tvm.relay.expr import Call, GlobalVar, TupleGetItem, const
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
from ... import _ffi_api
from ...dataflow_pattern import DFPatternCallback, is_constant, is_expr, is_op, rewrite, wildcard
from .register import register_pattern_table
logger = logging.getLogger("DNNL")
supported_post_elts = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by DNNL.
Parameters
----------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by DNNL.
"""
@tvm.ir.register_op_attr(op_name, "target.dnnl")
def _func_wrapper(expr):
args = expr.args
if any([x.checked_type.dtype == "int64" for x in args]):
logger.info("DNNL does not support int64.")
return False
# DNNL does not support pooling with ceil_mode = True.
if "pool" in op_name:
attrs = dict(get_attrs(expr))
if "ceil_mode" in attrs.keys() and attrs["ceil_mode"]:
return False
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_norm")
_register_external_op_helper("nn.conv1d")
_register_external_op_helper("nn.conv2d")
_register_external_op_helper("nn.conv3d")
_register_external_op_helper("nn.conv2d_transpose")
_register_external_op_helper("nn.conv3d_transpose")
_register_external_op_helper("nn.dense")
_register_external_op_helper("nn.max_pool2d")
_register_external_op_helper("nn.avg_pool2d")
_register_external_op_helper("nn.global_avg_pool2d")
_register_external_op_helper("nn.max_pool3d")
_register_external_op_helper("nn.avg_pool3d")
_register_external_op_helper("abs")
_register_external_op_helper("clip")
_register_external_op_helper("exp")
_register_external_op_helper("log")
_register_external_op_helper("sqrt")
_register_external_op_helper("round")
_register_external_op_helper("nn.relu")
_register_external_op_helper("nn.leaky_relu")
_register_external_op_helper("tanh")
_register_external_op_helper("sigmoid")
_register_external_op_helper("nn.softmax")
_register_external_op_helper("add")
_register_external_op_helper("multiply")
_register_external_op_helper("nn.layer_norm")
_register_external_op_helper("nn.batch_matmul")
def append_eltwise_ops(op, eltwise):
"""Append element-wise post-ops to conv / conv_transpose / dense
Parameters
----------
op : str
The op name to be attached with element-wise post-op.
eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : CallPattern
Call node sequence.
"""
if eltwise == "gelu":
const1 = wildcard()
const2 = wildcard()
const3 = wildcard()
div = is_op("divide")(op, const1)
erf_val = is_op("erf")(div)
added_erf_val = is_op("add")(erf_val, const2)
mul_val = is_op("multiply")(op, added_erf_val)
op = is_op("multiply")(mul_val, const3)
elif eltwise == "swish":
sig_out = is_op("sigmoid")(op)
op = is_op("multiply")(op, sig_out)
elif eltwise == "mish":
const1 = wildcard()
exp = is_op("exp")(op)
add = is_op("add")(exp, const1)
log = is_op("log")(add)
tanh = is_op("tanh")(log)
op = is_op("multiply")(op, tanh)
elif eltwise:
op = is_op(eltwise)(op)
return op
def make_conv_pattern(conv_name, with_bias=True, with_eltwise=None):
"""Create patterns related to conv and conv_transpose.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `conv / conv_transpose`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
conv_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError(f"Unsupported eltwise post-op: {with_eltwise}")
data = wildcard()
weight = wildcard()
bias = wildcard()
conv = is_op(conv_name)(data, weight)
if with_bias:
conv_out = is_op("add")(conv, bias)
else:
conv_out = conv
return append_eltwise_ops(conv_out, with_eltwise)
def make_conv_bias_sum_relu_pattern(conv_type, has_relu=True):
"""Create patterns with sum op.
Parameters
----------
conv_type : str
Should be nn.conv1d / nn.conv2d / nn.conv3d.
has_relu : bool
Whether attach relu.
Returns
-------
out : CallPattern
Call node sequence.
"""
data1 = wildcard()
weight = wildcard()
bias = wildcard()
data2 = wildcard()
out = is_op(conv_type)(data1, weight)
out = is_op("add")(out, bias)
out = is_op("add")(out, data2)
if has_relu:
out = is_op("nn.relu")(out)
return out
def make_dense_bias_sum_pattern():
"""Create patterns with sum op.
Parameters
----------
N/A
Returns
-------
out : CallPattern
Call node sequence.
"""
data1 = wildcard()
weight = wildcard()
bias = wildcard()
data2 = wildcard()
out = is_op("nn.dense")(data1, weight)
out = is_op("add")(out, bias)
out = is_op("add")(out, data2)
return "dnnl.dense_bias_sum", out
def get_op_name(expr):
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr):
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr):
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
def make_sum_pattren_predicate(checker):
"""Check whether the conv_bias_add_sum pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
for e, op_name in zip([expr, expr.args[0]], ["sum", "bias_add"]):
args = get_args(e)
attrs = get_attrs(e.args[0])
if not checker(attrs, args, op_name):
return False
return True
return predicate
def make_bias_add_pattren_predicate(checker):
"""Check whether the conv_bias pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
if get_op_name(expr) == "add":
args = get_args(expr)
attrs = get_attrs(expr.args[0])
if not checker(attrs, args, "bias_add"):
return False
return True
return predicate
def add_checker(attrs, args, op_name):
"""Check if add is aligned with elementwise_add and bias_add."""
if op_name == "sum":
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "add":
return False
if tuple(get_shape(args[0])) != tuple(get_shape(args[1])):
return False
if op_name == "bias_add":
if attrs is None:
return False
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "nn.conv2d":
return False
channel = dict(attrs)["channels"]
const_shape = get_shape(args[1])
if channel != reduce(lambda x, y: x * y, const_shape):
return False
return True
def make_dense_pattern(with_bias=True, with_eltwise=None):
"""Create patterns related to nn.dense.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
dense_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError(f"Unsupported eltwise post-op: {with_eltwise}")
data = wildcard()
weight = wildcard()
bias = wildcard()
dense = is_op("nn.dense")(data, weight)
if with_bias:
dense_out = is_op("add")(dense, bias)
else:
dense_out = dense
return append_eltwise_ops(dense_out, with_eltwise)
def make_dnnl_pattern(op_name, with_bias, with_eltwise):
"""Create dnnl patterns.
Parameters
----------
op_name : str
The first call node's op name.
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
pat_name = op_name.replace("nn", "dnnl")
if "_transpose" in op_name:
pat_name = "dnnl.deconv" + op_name.split("_")[0][-2::]
pat_name += "_bias" if with_bias else ""
pat_name += ("_" + with_eltwise.split(".")[-1]) if with_eltwise else ""
if "conv" in op_name:
dnnl_pattern = (
pat_name,
make_conv_pattern(op_name, with_bias, with_eltwise),
make_bias_add_pattren_predicate(add_checker),
)
elif op_name == "nn.dense":
dnnl_pattern = (pat_name, make_dense_pattern(with_bias, with_eltwise))
else:
logger.warning(
"Currently, only conv1d, conv2d, conv2d_transpose, conv3d_transpose, "
"dense op are supported, but got %s.",
op_name,
)
dnnl_pattern = ()
return dnnl_pattern
def make_qnn_conv2d_pattern():
"""Make qnn.conv2d based pattern supported by DNNL
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.conv2d")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. Ex: act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "dnnl.qnn.conv2d", pat
def make_qnn_dense_pattern():
"""Make qnn.dense based pattern supported by DNNL
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.dense")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. ex act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "dnnl.qnn.dense", pat
@register_pattern_table("dnnl")
def pattern_table():
"""Create dnnl patterns.
Returns
-------
dnnl_patterns : List[dnnl_pattern]
Created patterns.
"""
dnnl_patterns = list()
dnnl_patterns.append(make_qnn_conv2d_pattern())
dnnl_patterns.append(make_qnn_dense_pattern())
dnnl_patterns.append(make_dense_bias_sum_pattern())
dnnl_patterns.append(
(
"dnnl.conv2d_bias_sum_relu",
make_conv_bias_sum_relu_pattern("nn.conv2d"),
make_sum_pattren_predicate(add_checker),
)
)
dnnl_patterns.append(
(
"dnnl.conv2d_bias_sum",
make_conv_bias_sum_relu_pattern("nn.conv2d", False),
make_sum_pattren_predicate(add_checker),
)
)
elt_list = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
for with_bias in [True, False]:
for elt in elt_list:
if not with_bias and not elt:
continue
for conv_name in [
"nn.conv1d",
"nn.conv2d",
"nn.conv3d",
"nn.conv2d_transpose",
"nn.conv3d_transpose",
]:
dnnl_patterns.append(make_dnnl_pattern(conv_name, with_bias, elt))
dnnl_patterns.append(make_dnnl_pattern("nn.dense", with_bias, elt))
return dnnl_patterns
def get_optimal_layout_for_conv(
data_layout, kernel_layout, weight_shape, out_shape, paddings, strides, dilates, groups, dtype
):
"""Get the optimal layout of dnnl, given shape of conv2d.
Parameters
----------
data_layout, kernel_layout,weight_shape, out_shape, paddings, strides, dilates, groups
: String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
def get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
):
"""Get the optimal layout of dnnl, given shape of tranposed conv2d.
Parameters
----------
data_layout, kernel_layout, weight_shape, out_shape, paddings, output_paddings, strides,
dilates, groups
: Int, String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
def get_shape(tensor):
"""Get tensor's shape."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.concrete_shape
if isinstance(tensor, relay.expr.Constant):
return tensor.data.shape
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.concrete_shape
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].shape
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].shape
return tensor.checked_type.shape
raise TypeError(f"Unsupport data type: {type(tensor)}")
def get_dtype(tensor):
"""Get tensor's dtype."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.dtype
if isinstance(tensor, relay.expr.Constant):
return tensor.data.dtype
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.dtype
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].dtype
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].dtype
return tensor.checked_type.dtype
raise TypeError(f"Unsupport data type: {type(tensor)}")
def tag2layout(input_data, is_weight=False, conv_type="Conv1D"):
"""Transfer layout, denoted with `a, b, c, d, e`,
into valid layout (NCHW / OIHW) of TVM."""
if "Conv1D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "W"}
weight_dic = {"a": "O", "b": "I", "c": "W", "d": "G"}
elif "Conv2D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "H", "d": "W"}
weight_dic = {"a": "O", "b": "I", "c": "H", "d": "W"}
if "e" in input_data:
weight_dic = {"a": "G", "b": "O", "c": "I", "d": "H", "e": "W"}
elif "Conv3D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "D", "d": "H", "e": "W"}
weight_dic = {"a": "O", "b": "I", "c": "D", "d": "H", "e": "W", "f": "G"}
dic = weight_dic if is_weight else data_dic
res = ""
for i in input_data:
if i.isupper():
i = i.lower()
res += dic[i]
dic[i] = dic[i].lower()
elif i.islower():
res += dic[i]
elif i.isdigit():
res += i
else:
raise ValueError(f"Unsupport layout format: {input_data}")
return res
def legalize_pad_avg_pool(attrs, inputs, types):
"""Legalize pad->avg_pool2d pattern.
Fuse this pattern into one avg_pool2d with padding = (1, 1),
and count_include_pad = True"""
data = inputs[0]
new_attrs = dict(attrs)
if isinstance(data, relay.expr.Call) and data.op.name == "nn.pad":
new_attrs["padding"] = (1, 1)
new_attrs["count_include_pad"] = True
return relay.nn.avg_pool2d(data.args[0], **new_attrs)
return relay.nn.avg_pool2d(data, **attrs)
def legalize_group_conv(attrs, inputs, types):
"""Legalize group conv / conv_transpose calculation.
Alter weight layout from OIHW to GOIHW / IOHW to GIOHW"""
groups = attrs.groups
data, weight = inputs
if groups == 1:
if "Transpose" not in type(attrs).__name__:
return relay.nn.conv2d(data, weight, **attrs)
return relay.nn.conv2d_transpose(data, weight, **attrs)
OC, IC, H, W = get_shape(weight)
new_attrs = dict(attrs)
weight = relay.reshape(weight, (groups, OC // groups, IC, H, W))
if "Transpose" not in type(attrs).__name__:
new_attrs["kernel_layout"] = "GOIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
new_attrs["kernel_layout"] = "GIOHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
def alter_conv(attrs, inputs, tinfos, out_type):
"""The convolution's layout auto-query func for dnnl."""
data, weight = inputs
groups = str(attrs.groups)
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1D":
return relay.nn.conv1d(data, weight, **new_attrs)
if conv_type == "Conv2D":
return relay.nn.conv2d(data, weight, **new_attrs)
return relay.nn.conv3d(data, weight, **new_attrs)
def alter_conv_transpose(attrs, inputs, tinfos, out_type):
"""The transposed convolution's layout auto-query func for dnnl."""
data, weight = inputs
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
output_paddings = ",".join([str(x) for x in attrs.get_int_tuple("output_padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
groups = str(attrs.groups)
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv_transpose(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1DTranspose":
return relay.nn.conv1d_transpose(data, weight, **new_attrs)
if conv_type == "Conv2DTranspose":
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
return relay.nn.conv3d_transpose(data, weight, **new_attrs)
class IsComputeIntensiveGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and
its transpose and dense.
"""
def __init__(self):
ExprVisitor.__init__(self)
self.is_compute_intensive = False
def visit_call(self, call):
compute_intensive_ops = set(
[
"nn.conv1d",
"nn.conv2d",
"nn.conv2d_transpose",
"nn.conv3d",
"nn.conv3d_transpose",
"nn.dense",
"nn.layer_norm",
"nn.batch_matmul",
"nn.global_avg_pool2d",
]
)
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op.name) in compute_intensive_ops:
self.is_compute_intensive = True
return super().visit_call(call)
def is_graph_compute_intensive(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if it's compute intensive"
"""
self.visit(subgraph)
return self.is_compute_intensive
def is_valid_subgraph(body):
"""Final check on whether the subgraph is valid and should be offloaded to DNNL."""
return IsComputeIntensiveGraph().is_graph_compute_intensive(body)
def prune_dnnl_subgraphs(mod):
"""
Removes invalid subgraphs, which does not contain compute intensive dnnl ops.
"""
class SubgraphRemover(ExprMutator):
"""
Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.
"""
def __init__(self, subgraphs_to_remove, mod, new_mod):
ExprMutator.__init__(self)
self.subgraphs_to_remove = subgraphs_to_remove
self.mod = mod
self.new_mod = new_mod
def visit_call(self, call):
if isinstance(call.op, GlobalVar):
name = call.op.name_hint
if name in self.subgraphs_to_remove:
# "Inline" the subgraph back into new main function.
func = self.mod[name]
var_map = {}
for arg, param in zip(call.args, func.params):
var_map[param] = super().visit(arg)
new_body = relay.bind(func.body, var_map)
return new_body
if name != "main":
args = []
for arg in call.args:
args.append(super().visit(arg))
return call.op(*args)
return super().visit_call(call)
subgraphs_to_remove = []
# If only one subgraph, do nothing.
if len(mod.get_global_vars()) <= 2:
return mod
# Remove invalid subgraphs
for subgraph in mod.get_global_vars():
name = subgraph.name_hint
if not mod[name].attrs or mod[name].attrs["Compiler"] != "dnnl":
continue
if not is_valid_subgraph(mod[name].body):
subgraphs_to_remove.append(name)
# Create new pruned module
new_mod = tvm.IRModule(mod.functions, mod.type_definitions)
new_mod["main"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod["main"])
new_mod = transform.RemoveUnusedFunctions()(new_mod)
return new_mod
class LayerNormRewrite(DFPatternCallback):
"""
A callback to rewrite the following operators into a single layer normalization operator.
Pattern #1:
1 %4 = mean(%3, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
2 %5 = subtract(%3, %4) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %6 = cast(%5, dtype="float32") /* ty=Tensor[(1, 3136, 64), float32] */;
4 %7 = power(%6, 2f /* ty=float32 */) /* ty=Tensor[(1, 3136, 64), float32] */;
5 %8 = mean(%7, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
6 %9 = add(%8, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 3136, 1), float32] */;
7 %10 = sqrt(%9) /* ty=Tensor[(1, 3136, 1), float32] */;
8 %11 = divide(%5, %10) /* ty=Tensor[(1, 3136, 64), float32] */;
9 %12 = multiply(%11, meta[relay.Constant][2] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
10 %13 = add(%12, meta[relay.Constant][3] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %0 = mean(%input, axis=[-1], keepdims=True);
2 %1 = variance(%input, %0, axis=[-1], keepdims=True);
3 %2 = add(%1, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 49, 1), float32] */;
4 %3 = subtract(%input, %0);
5 %4 = sqrt(%2) /* ty=Tensor[(1, 49, 1), float32] */;
6 %5 = divide(%3, %4);
7 %6 = multiply(%5, meta[relay.Constant][0] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */;
8 %7 = add(%6, meta[relay.Constant][1] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */
"""
def __init__(self):
super(LayerNormRewrite, self).__init__()
self.data = wildcard()
self.gamma = wildcard()
self.beta = wildcard()
mu = is_op("mean")(self.data)
diff = is_op("subtract")(self.data, mu)
cdiff = diff | is_op("cast")(diff)
const_two = is_expr(relay.const(2)) | is_expr(relay.const(2.0))
p1 = is_op("power")(cdiff, const_two)
mp1 = is_op("mean")(p1) | is_op("variance")(self.data, mu)
eps = is_expr(relay.const(1e-5)) | is_expr(relay.const(1e-6))
added_eps = is_op("add")(mp1, eps)
deno = is_op("sqrt")(added_eps)
div_out = is_op("divide")(diff, deno)
div_out2 = diff * is_op("rsqrt")(added_eps)
weighted = is_op("multiply")(div_out | div_out2, self.gamma)
added_bias = is_op("add")(weighted, self.beta)
self.pattern = added_bias
def callback(self, pre, post, node_map):
data = node_map[self.data][0]
gamma = node_map[self.gamma][0]
beta = node_map[self.beta][0]
return relay.op.nn.layer_norm(data=data, gamma=gamma, beta=beta)
def rewrite_layer_norm(mod):
"""Rewrite the input graph to replace multiple operators with a TVM native layer normalization
operator so that we can offload them to dnnl layer normalization byoc part.
"""
mod["main"] = rewrite(LayerNormRewrite(), mod["main"])
return mod
class DenseReshapeBiasGeluRewrite(DFPatternCallback):
"""
A callback to reorder reshape operators when the patterns are as below:
Pattern #1:
1 %62 = nn.dense(%61, meta[relay.Constant][13] /* ty=Tensor[(64, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 64), float32] */;
2 %63 = reshape(%62, newshape=[1, 3136, 64]) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %64 = add(meta[relay.Constant][4] /* ty=Tensor[(64), float32] */, %63)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %76 = nn.dense(%75, meta[relay.Constant][18] /* ty=Tensor[(512, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 512), float32] */;
2 %77 = reshape(%76, newshape=[1, 3136, 512]) /* ty=Tensor[(1, 3136, 512), float32] */;
3 %78 = add(meta[relay.Constant][15] /* ty=Tensor[(512), float32] */, %77)
/* ty=Tensor[(1, 3136, 512), float32] */;
4 %79 = divide(%78, 1.41421f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
5 %80 = erf(%79) /* ty=Tensor[(1, 3136, 512), float32] */;
6 %81 = add(%80, 1f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
7 %82 = multiply(%78, %81) /* ty=Tensor[(1, 3136, 512), float32] */;
8 %83 = multiply(%82, 0.5f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
"""
def __init__(self, has_gelu=True):
super(DenseReshapeBiasGeluRewrite, self).__init__()
self.data = wildcard()
self.weight = wildcard()
self.bias = wildcard()
self.const1 = wildcard()
self.const2 = wildcard()
self.const3 = wildcard()
self.attr_map = {}
self.has_gelu = has_gelu
den = is_op("nn.dense")(self.data, self.weight)
re_den = is_op("reshape")(den)
added = is_op("add")(self.bias, re_den)
if self.has_gelu:
divisor = is_op("divide")(added, self.const1)
val_erf = is_op("erf")(divisor)
added_erf = is_op("add")(val_erf, self.const2)
mul1 = is_op("multiply")(added, added_erf)
mul2 = is_op("multiply")(mul1, self.const3)
self.pattern = mul2
else:
self.pattern = added
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("reshape"):
new_attrs = {}
for k in expr.attrs.keys():
new_attrs[k] = expr.attrs[k]
self.attr_map["reshape"] = new_attrs
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
weight = node_map[self.weight][0]
bias = node_map[self.bias][0]
den = relay.op.nn.dense(data, weight)
added = relay.op.add(bias, den)
if not self.has_gelu:
return relay.op.reshape(added, self.attr_map["reshape"]["newshape"])
const1 = node_map[self.const1][0]
const2 = node_map[self.const2][0]
const3 = node_map[self.const3][0]
divisor = relay.op.divide(added, const1)
val_erf = relay.op.erf(divisor)
added_erf = relay.op.add(val_erf, const2)
mul1 = relay.op.multiply(added, added_erf)
mul2 = relay.op.multiply(mul1, const3)
return relay.op.reshape(mul2, self.attr_map["reshape"]["newshape"])
def rewrite_dense_bias_gelu_reshape_last(mod):
"""Rewrite the input graph to reorder reshape operators so that
we can perform dense_bias_gelu/dense_bias fusion and then offload
them to byoc part.
"""
mod["main"] = rewrite(
[DenseReshapeBiasGeluRewrite(), DenseReshapeBiasGeluRewrite(has_gelu=False)], mod["main"]
)
return mod
class ResNetV1Rewrite(DFPatternCallback):
"""
A callback to advance downsize operation when the patterns are as pattern1,
and the result is written in pattern2:
Pattern #1:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3));
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = add(%33, %25);
%35 = nn.relu(%34);
%36 = nn.conv2d(%35, ty=Tensor[(128, 256, 1, 1), strides=[2, 2]);
%37 = add(%36, ty=Tensor[(128, 1, 1));
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, ty=Tensor[(128, 128, 3, 3));
%40 = add(%39, ty=Tensor[(128, 1, 1)]);
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, ty=Tensor[(512, 128, 1, 1));
%43 = nn.conv2d(%35, ty=Tensor[(512, 256, 1, 1), strides=[2, 2]);
%44 = add(%42, ty=Tensor[(512, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, %45);
%47 = nn.relu(%46);
Pattern #2:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3), strides=[2, 2]);
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = nn.max_pool2d(%25, pool_size=[1, 1], strides=[2, 2], padding=[0, 0, 0, 0]);
%35 = add(%33, %34);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, ty=Tensor[(128, 256, 1, 1));
%38 = add(%37, ty=Tensor[(128, 1, 1));
%39 = nn.relu(%38);
%40 = nn.conv2d(%39, ty=Tensor[(128, 128, 3, 3));
%41 = add(%40, ty=Tensor[(128, 1, 1));
%42 = nn.relu(%41);
%43 = nn.conv2d(%42, ty=Tensor[(512, 128, 1, 1));
%44 = nn.conv2d(%36, ty=Tensor[(512, 256, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, ty=Tensor[(512, 1, 1));
%47 = add(%45, %46);
%48 = nn.relu(%47);
"""
def __init__(self):
super(ResNetV1Rewrite, self).__init__()
self.attr_lst = []
self.data = wildcard()
self.w1, self.b1 = wildcard(), wildcard()
self.w2, self.b2 = wildcard(), wildcard()
self.w3, self.b3 = wildcard(), wildcard()
self.w4, self.b4 = wildcard(), wildcard()
self.w5, self.b5 = wildcard(), wildcard()
self.w6, self.b6 = wildcard(), wildcard()
self.w7, self.b7 = wildcard(), wildcard()
conv1 = is_op("nn.conv2d")(self.data, self.w1).has_attr({"kernel_size": [1, 1]})
conv1 = is_op("add")(conv1, self.b1)
conv1 = is_op("nn.relu")(conv1)
conv2 = is_op("nn.conv2d")(conv1, self.w2).has_attr({"kernel_size": [3, 3]})
conv2 = is_op("add")(conv2, self.b2)
conv2 = is_op("nn.relu")(conv2)
conv3 = is_op("nn.conv2d")(conv2, self.w3).has_attr({"kernel_size": [1, 1]})
conv3 = is_op("add")(conv3, self.b3)
conv3 = is_op("add")(conv3, self.data)
conv3 = is_op("nn.relu")(conv3)
left_conv4 = is_op("nn.conv2d")(conv3, self.w4).has_attr({"strides": [2, 2]})
left_conv4 = is_op("add")(left_conv4, self.b4)
left_conv4 = is_op("nn.relu")(left_conv4)
left_conv5 = is_op("nn.conv2d")(left_conv4, self.w5).has_attr({"kernel_size": [3, 3]})
left_conv5 = is_op("add")(left_conv5, self.b5)
left_conv5 = is_op("nn.relu")(left_conv5)
left_conv6 = is_op("nn.conv2d")(left_conv5, self.w6).has_attr({"kernel_size": [1, 1]})
left_conv6 = is_op("add")(left_conv6, self.b6)
right_conv7 = is_op("nn.conv2d")(conv3, self.w7).has_attr({"strides": [2, 2]})
right_conv7 = is_op("add")(right_conv7, self.b7)
out = is_op("add")(left_conv6, right_conv7)
out = is_op("nn.relu")(out)
self.pattern = out
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("nn.conv2d"):
self.attr_lst.append(expr.attrs)
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
w1, b1 = node_map[self.w1][0], node_map[self.b1][0]
w2, b2 = node_map[self.w2][0], node_map[self.b2][0]
w3, b3 = node_map[self.w3][0], node_map[self.b3][0]
w4, b4 = node_map[self.w4][0], node_map[self.b4][0]
w5, b5 = node_map[self.w5][0], node_map[self.b5][0]
w6, b6 = node_map[self.w6][0], node_map[self.b6][0]
w7, b7 = node_map[self.w7][0], node_map[self.b7][0]
new_attrs = self.attr_lst[-7]
conv1 = relay.op.nn.conv2d(data, w1, **new_attrs)
conv1 = relay.op.add(conv1, b1)
conv1 = relay.op.nn.relu(conv1)
new_attrs = dict(self.attr_lst[-6])
new_attrs["strides"] = [2, 2]
conv2 = relay.op.nn.conv2d(conv1, w2, **new_attrs)
conv2 = relay.op.add(conv2, b2)
conv2 = relay.op.nn.relu(conv2)
new_attrs = self.attr_lst[-5]
conv3 = relay.op.nn.conv2d(conv2, w3, **new_attrs)
conv3 = relay.op.add(conv3, b3)
max_pool = relay.op.nn.max_pool2d(
data, pool_size=(1, 1), strides=(2, 2), layout=new_attrs["data_layout"]
)
conv3 = relay.op.add(conv3, max_pool)
conv3 = relay.op.nn.relu(conv3)
new_attrs = dict(self.attr_lst[-4])
new_attrs["strides"] = [1, 1]
left_conv4 = relay.op.nn.conv2d(conv3, w4, **new_attrs)
left_conv4 = relay.op.add(left_conv4, b4)
left_conv4 = relay.op.nn.relu(left_conv4)
new_attrs = self.attr_lst[-3]
left_conv5 = relay.op.nn.conv2d(left_conv4, w5, **new_attrs)
left_conv5 = relay.op.add(left_conv5, b5)
left_conv5 = relay.op.nn.relu(left_conv5)
new_attrs = self.attr_lst[-2]
left_conv6 = relay.op.nn.conv2d(left_conv5, w6, **new_attrs)
left_conv6 = relay.op.add(left_conv6, b6)
new_attrs = dict(self.attr_lst[-1])
new_attrs["strides"] = [1, 1]
right_conv7 = relay.op.nn.conv2d(conv3, w7, **new_attrs)
right_conv7 = relay.op.add(right_conv7, b7)
out = relay.op.add(left_conv6, right_conv7)
out = relay.op.nn.relu(out)
self.attr_lst = []
return out
def rewrite_resnetv1(mod):
"""Rewrite the the ResNetV1 downsize block to reduce the computation complexity."""
mod["main"] = rewrite(ResNetV1Rewrite(), mod["main"])
return mod
class LegalizeQnnOpForDnnl(DFPatternCallback):
"""Legalize QNN based patterns to match DNNL
original pattern:
OP = qnn.dense | qnn.conv2d
%1 = OP<int>(SRC, WGH) - OP<int>(src_zp, WGH) // qnn.conv2d
%2 = %1 + orig_bias // bias
%2 = (%1 - rq_in_zp) * rq_in_scl / rq_out_scl + rq_out_zp // qnn.requantize
%3 = act(%2) // activation == clip
%4 = ((%3 - sum_lh_zp) * sum_lh_scl + (SRC2 - sum_rh_zp) * sum_rh_scl) // qnn.add
/ sum_out_scl + sum_out_zp
transform to DNNL compatible:
%1 = OP<int>(SRC, WGH)
%2 = cast(%1, dtype="float")
%2 = (%1 + bias) * o_scl
%3 = act(%2) * act_scl
%4 = %3 + SRC2 * sum_scl
%5 = %4 + dst_zp
%6 = cast(%5, dtype="float")
where:
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
bias = orig_bias - OP(src_zp, WGH) - rq_in_zp + rq_out_zp * rq_out_scl / rq_in_scl
dst_zp = sum_out_zp - sum_lhs_zp * sum_lhs_scl / sum_out_scl -
sum_rhs_zp * sum_rhs_scl / sum_out_scl
"""
def __init__(self):
super(LegalizeQnnOpForDnnl, self).__init__()
self.src = wildcard()
self.wgh = wildcard()
self.bias = wildcard()
self.sum_src = wildcard()
self.src_scl = is_constant()
self.src_zp = is_constant()
self.wgh_scl = is_constant()
self.wgh_zp = is_expr(const(0))
self.rq_in_scl = is_constant()
self.rq_in_zp = is_constant()
self.rq_out_scl = is_constant()
self.rq_out_zp = is_constant()
self.sum_lhs_scl = is_constant()
self.sum_lhs_zp = is_constant()
self.sum_rhs_scl = is_constant()
self.sum_rhs_zp = is_constant()
self.sum_out_scl = is_constant()
self.sum_out_zp = is_constant()
self.root = (is_op("qnn.conv2d") | is_op("qnn.dense"))(
self.src, self.wgh, self.src_zp, self.wgh_zp, self.src_scl, self.wgh_scl
)
pat = is_op("add")(self.root, self.bias) | self.root # optional bias
pat = is_op("qnn.requantize")(
pat, self.rq_in_scl, self.rq_in_zp, self.rq_out_scl, self.rq_out_zp
)
pat = is_op("clip")(pat)
cast = is_op("cast")(pat)
pat = is_op("qnn.add")(
cast,
self.sum_src,
self.sum_lhs_scl,
self.sum_lhs_zp,
self.sum_rhs_scl,
self.sum_rhs_zp,
self.sum_out_scl,
self.sum_out_zp,
)
pat = is_op("clip")(pat)
self.pattern = pat | cast
def callback(self, pre, post, node_map):
root = node_map[self.root][0]
src = node_map[self.src][0]
wgh = node_map[self.wgh][0]
bias = node_map.get(self.bias, default=[relay.const(0, dtype="int32")])[0]
src_zp = node_map[self.src_zp][0]
rq_in_scl = node_map[self.rq_in_scl][0]
rq_in_zp = node_map[self.rq_in_zp][0]
rq_out_scl = node_map[self.rq_out_scl][0]
rq_out_zp = node_map[self.rq_out_zp][0]
final_dtype = node_map[self.pattern][0].checked_type.dtype
if root.op == relay.op.get("qnn.conv2d"):
dst_layout = root.attrs.out_layout
dst_layout = root.attrs.data_layout if dst_layout == "" else dst_layout
wgh_layout = root.attrs.kernel_layout
else:
# qnn.dense has no layout attributes. Assume that is plain
dst_layout = "NC"
wgh_layout = "OI"
# TODO(@apeskov): dst_layout may ne blocked
bias_rank = len(dst_layout) - dst_layout.index("C")
sum_src = node_map[self.sum_src][0] if self.sum_src in node_map else None
# Default values if qnn.sum is not present
sum_lhs_scl = node_map[self.sum_lhs_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_lhs_zp = node_map[self.sum_lhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_rhs_scl = node_map[self.sum_rhs_scl][0] if sum_src else relay.const(0, dtype="float32")
sum_rhs_zp = node_map[self.sum_rhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_out_scl = node_map[self.sum_out_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_out_zp = node_map[self.sum_out_zp][0] if sum_src else relay.const(0, dtype="int32")
def cast_fp(op):
return relay.op.cast(op, dtype="float32")
# recalculate some factors
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
dst_zp = (
cast_fp(sum_out_zp)
- cast_fp(sum_lhs_zp) * sum_lhs_scl / sum_out_scl
- cast_fp(sum_rhs_zp) * sum_rhs_scl / sum_out_scl
)
bias = self.squeeze_bias(bias, dst_layout)
bias = (
cast_fp(bias)
- cast_fp(self.fake_op(src_zp, wgh, wgh_layout))
- cast_fp(rq_in_zp)
+ cast_fp(rq_out_zp) * rq_out_scl / rq_in_scl
)
bias = self.broadcast_to_rank(bias, bias_rank)
zero_zp = relay.const(0, dtype="int32")
one_scl = relay.const(1.0, dtype="float32")
# construct new graph with proper post op ordering
gr = tvm.relay.Call(
root.op,
[src, wgh, zero_zp, zero_zp, one_scl, one_scl],
root.attrs,
root.type_args,
root.span,
)
gr = relay.op.cast(gr, dtype="float32")
gr = gr + bias
gr = gr * o_scl
gr = relay.op.clip(gr, 0, 255) * act_scl
gr = gr + sum_scl * cast_fp(sum_src) if sum_src else gr
gr = gr + dst_zp
gr = relay.op.cast(gr, dtype=final_dtype)
return gr
@staticmethod
def fake_op(zp, wgh, layout):
"""Fake operator implementation for zp broadcast input"""
# Conv: reduce kernel {OC, IC, KH, KW} -> {OC} in case of group that is still correct
# Dense: reduce kernel {OC, IC} -> {OC}
wgh_int = relay.op.cast(wgh, dtype="int32")
reduced_kernel = relay.op.sum(
wgh_int, axis=[layout.index("O")], keepdims=False, exclude=True
)
return zp * reduced_kernel
@staticmethod
def squeeze_bias(bias, layout):
shape = transform.InferTypeLocal(bias).concrete_shape
c_position = layout.index("C") - len(layout) + len(shape)
squeeze_idxs = [i for i in range(len(shape)) if i != c_position]
return relay.op.squeeze(bias, squeeze_idxs)
@staticmethod
def broadcast_to_rank(op, rank):
"""Scalar or 1D tensor are supported"""
shape = transform.InferTypeLocal(op).concrete_shape
if len(shape) == 0:
return op
if len(shape) == 1:
return relay.op.expand_dims(op, 1, rank - 1)
raise ValueError("Unexpected bias rank to broadcast. Only 0 and 1 are supported.")
def legalize_qnn_for_dnnl(mod):
"""Transform qnn primitives to DNNL compatible form. Eliminate source zero point and apply
strict sequence of post ops."""
mod["main"] = rewrite(LegalizeQnnOpForDnnl(), mod["main"])
seq = tvm.transform.Sequential(
[
transform.InferType(),
# transform.SimplifyInference(), # TODO: this pass decompose nn.layer_norm
# transform.FoldScaleAxis(), # TODO: fail inside TVM in case of grouped convolutions.
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod
| 48,265 | 34.256392 | 99 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/arm_compute_lib.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, dangerous-default-value
"""Arm Compute Library supported operators."""
import tvm
from tvm import relay
from tvm._ffi import register_func
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.expr import const
from ...dataflow_pattern import is_constant, is_expr, is_op, is_tuple, wildcard
from ..strategy.generic import is_depthwise_conv2d
from .register import register_pattern_table
def is_arm_compute_runtime_enabled():
"""Check if the ACL graph executor is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.op.is_arm_compute_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def partition_for_arm_compute_lib(mod, params=None, disabled_ops=["concatenate"], **opts):
"""Partition the graph greedily offloading supported
operators to Arm Compute Library.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
disabled_ops : Optional[list]
Ops do not want to offload to ACL.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(arm_compute_lib_pattern_table(disabled_ops)),
transform.AnnotateTarget("arm_compute_lib", False),
transform.PartitionGraph(),
]
)
return seq(mod)
@register_func("relay.ext.arm_compute_lib.optimize")
def preprocess_module(mod):
"""
Pre-process a module containing functions ready for ACL codegen. For now we enforce OHWI
kernel layout and fold the transforms away.
Parameters
----------
mod : Module
The module to run passes on.
Returns
-------
preprocessed_mod : The processed module.
"""
def convert_layout_conv2d(conv2d_function):
def convert_conv(attrs, inputs, tinfos, desired_layouts):
new_attrs = dict(attrs)
data_info = tinfos[0]
weight_info = tinfos[1]
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
new_attrs["data_layout"] = desired_data_layout
new_attrs["kernel_layout"] = desired_kernel_layout
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
dkl = desired_kernel_layout
new_attrs["kernel_layout"] = dkl[3] + dkl[1:3] + dkl[0]
return conv2d_function(*inputs, **new_attrs)
return convert_conv
with OpAttrContext(
"nn.conv2d", "FTVMConvertOpLayout", convert_layout_conv2d(tvm.relay.nn.conv2d)
), OpAttrContext(
"qnn.conv2d", "FTVMConvertOpLayout", convert_layout_conv2d(tvm.relay.qnn.op.conv2d)
):
seq = tvm.transform.Sequential(
[
transform.ConvertLayout(
{"nn.conv2d": ["NHWC", "OHWI"], "qnn.conv2d": ["NHWC", "OHWI"]}
),
transform.FoldConstant(),
]
)
preprocessed_mod = seq(mod)
return preprocessed_mod
@register_pattern_table("arm_compute_lib")
def arm_compute_lib_pattern_table(disabled_ops=["concatenate"]):
"""Get the ACL pattern table."""
def conv_pattern():
"""Create a convolution pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("nn.conv2d")(pattern, is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(is_op("nn.relu"))
return pattern
def qnn_conv_pattern():
"""Create a quantized convolution pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("qnn.conv2d")(
pattern, is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(is_op("nn.relu"))
pattern = is_op("qnn.requantize")(
pattern, wildcard(), wildcard(), is_constant(), is_constant()
)
return pattern
def dense_pattern():
"""Create a dense (fully-connected) pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.dense")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
return pattern
def qnn_dense_pattern():
"""Create a quantized dense (fully-connected) pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = is_op("qnn.requantize")(
pattern, wildcard(), wildcard(), is_constant(), is_constant()
)
return pattern
def avg_pool2d_pattern():
"""Creates a pattern that matches either quantized
avg_pool2d or quantized global_avg_pool2d.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern) | is_op("nn.global_avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
return pattern
def l2_pool2d_pattern():
"""Create an l2 pooling pattern from equivalent relay operators.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("power")(wildcard(), is_expr(const(2.0)))
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("sqrt")(pattern)
return pattern
def concatenate_pattern():
"""Create an concatenate pattern from equivalent relay operators.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the concatenate pattern.
"""
pattern = is_op("concatenate")(is_tuple(None))
return pattern
def check_conv(extract):
"""Check conv pattern is supported by ACL."""
call = extract
while call.op.name != "nn.conv2d":
call = call.args[0]
return conv2d(call)
def check_qnn_conv(extract):
"""Check qnn conv pattern is supported by ACL."""
if extract.attrs.out_dtype not in ("uint8", "int8"):
return False
call = extract
while call.op.name != "qnn.conv2d":
call = call.args[0]
return qnn_conv2d(call)
def check_dense(extract):
"""Check conv pattern is supported by ACL."""
call = extract
while call.op.name != "nn.dense":
call = call.args[0]
return dense(call)
def check_qnn_dense(extract):
"""Check qnn conv pattern is supported by ACL."""
if extract.attrs.out_dtype not in ("uint8", "int8"):
return False
call = extract
while call.op.name != "qnn.dense":
call = call.args[0]
return qnn_dense(call)
def check_avg_pool2d(extract):
"""Check average pool2d pattern is supported by ACL."""
if extract.attrs.dtype not in ("uint8", "int8"):
return False
pool = extract.args[0]
if pool.args[0].attrs.dtype != "int32":
return False
return avg_pool2d(pool, from_quantized_composite=True)
def check_l2_pool2d(extract):
"""Check l2 pool2d pattern is supported by ACL."""
pool = extract.args[0]
return avg_pool2d(pool)
def check_concatenate(expr):
"""Check concatenate pattern is supported by ACL."""
if "concatenate" in disabled_ops:
return False
attrs, type_args = expr.attrs, expr.type_args
for idx in range(len(type_args[0].fields)):
if type_args[0].fields[idx].dtype not in ["float32", "uint8", "int8"]:
return False
# ACL concatenate only supports maximum 4 dimensions input tensor
if attrs.axis not in [-4, -3, -2, -1, 0, 1, 2, 3]:
return False
return True
return [
("arm_compute_lib.conv2d", conv_pattern(), check_conv),
("arm_compute_lib.qnn_conv2d", qnn_conv_pattern(), check_qnn_conv),
("arm_compute_lib.dense", dense_pattern(), check_dense),
("arm_compute_lib.qnn_dense", qnn_dense_pattern(), check_qnn_dense),
("arm_compute_lib.qnn_conv2d", qnn_conv_pattern(), check_qnn_conv),
("arm_compute_lib.avg_pool2d", avg_pool2d_pattern(), check_avg_pool2d),
("arm_compute_lib.l2_pool2d", l2_pool2d_pattern(), check_l2_pool2d),
("arm_compute_lib.concatenate", concatenate_pattern(), check_concatenate),
]
def _register_external_op_helper(op_name, supported=True):
@tvm.ir.register_op_attr(op_name, "target.arm_compute_lib")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("reshape")
@tvm.ir.register_op_attr("nn.conv2d", "target.arm_compute_lib")
def conv2d(expr):
"""Check if the external ACL codegen for conv2d should be used."""
attrs, args = expr.attrs, expr.args
if attrs.data_layout != "NHWC":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.shape[0] != 1 or data_typ.dtype != "float32":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype != "float32":
return False
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if is_depthwise:
return depthwise_conv2d(attrs, args)
# ACL doesn't support grouped convolution
if attrs.groups != 1 and not is_depthwise:
return False
return True
def qnn_conv2d(expr):
"""Check if the external ACL codegen for qnn.conv2d should be used."""
attrs, args = expr.attrs, expr.args
qnn_dtypes = ("uint8", "int8")
if attrs.data_layout != "NHWC":
return False
if attrs.out_dtype != "int32" and attrs.out_dtype != "":
return False
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.shape[0] != 1 or data_typ.dtype not in qnn_dtypes:
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype not in qnn_dtypes:
return False
if is_per_channel_quantization(
zero_point=args[2], scale=args[4]
) or is_per_channel_quantization(zero_point=args[3], scale=args[5]):
return False
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if is_depthwise:
return depthwise_conv2d(attrs, args)
# ACL doesn't support grouped convolution
if attrs.groups != 1 and not is_depthwise:
return False
return True
def depthwise_conv2d(attrs, args):
"""Check if the external ACL codegen for depthwise convolution should be used.
Note
----
Relay does not have a depthwise conv2d operator whilst ACL does. We simply
separate the checks for depthwise for clarity.
"""
kernel_typ = args[1].checked_type
# Only supports 3x3, 5x5 depthwise
if (
kernel_typ.shape[0] not in [3, 5]
or kernel_typ.shape[1] not in [3, 5]
or kernel_typ.shape[0] != kernel_typ.shape[1]
):
return False
# Stride must be (1, 1) or (2, 2)
if (attrs.strides[0], attrs.strides[1]) not in [(1, 1), (2, 2)]:
return False
return True
@tvm.ir.register_op_attr("nn.dense", "target.arm_compute_lib")
def dense(expr):
"""Check if the external ACL codegen for dense should be used."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype != "float32":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype != "float32":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
return True
def qnn_dense(expr):
"""Check if the external ACL codegen for qnn.dense should be used."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype not in ("uint8", "int8"):
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype not in ("uint8", "int8"):
return False
if attrs.out_dtype != "int32":
return False
if is_per_channel_quantization(
zero_point=args[2], scale=args[4]
) or is_per_channel_quantization(zero_point=args[3], scale=args[5]):
return False
return True
def check_dilation(attrs):
"""Prevents offloading if dilation other than (1, 1)"""
if not isinstance(attrs, relay.op.op_attrs.GlobalPool2DAttrs):
if not (len(attrs.dilation) == 2 and attrs.dilation[0] == 1 and attrs.dilation[1] == 1):
return False
return True
@tvm.ir.register_op_attr("nn.max_pool2d", "target.arm_compute_lib")
def max_pool2d(expr):
"""Check if the external ACL codegen for maxpool2d should be used."""
attrs, args = expr.attrs, expr.args
if attrs.layout != "NHWC":
return False
typ = args[0].checked_type
if typ.dtype not in ["float32", "uint8", "int8"]:
return False
return check_dilation(attrs)
@tvm.ir.register_op_attr("nn.avg_pool2d", "target.arm_compute_lib")
def avg_pool2d(expr, from_quantized_composite=False):
"""Check if the external ACL codegen for avgpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if from_quantized_composite:
if typ.dtype != "int32":
return False
else:
if typ.dtype not in ["float32"]:
return False
if attrs.layout != "NHWC":
return False
return check_dilation(attrs)
@tvm.ir.register_op_attr("nn.global_max_pool2d", "target.arm_compute_lib")
def global_max_pool2d(expr):
"""Check if the external ACL codegen for gloval_maxpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if typ.dtype not in ["float32", "uint8", "int8"]:
return False
if attrs.layout != "NHWC":
return False
return True
@tvm.ir.register_op_attr("nn.global_avg_pool2d", "target.arm_compute_lib")
def global_avg_pool2d(expr):
"""Check if the external ACL codegen for global_avgpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if typ.dtype not in ["float32"]:
return False
if attrs.layout != "NHWC":
return False
return True
@tvm.ir.register_op_attr("maximum", "target.arm_compute_lib")
def maximum(expr):
"""Check if the external ACL codegen for maximum should be used."""
args = expr.args
type_a = args[0].checked_type
type_b = args[0].checked_type
return (type_a.dtype == "float32") and (type_b.dtype == "float32")
@tvm.ir.register_op_attr("add", "target.arm_compute_lib")
def add(expr):
"""Check if the external ACL codegen for add should be used."""
args = expr.args
for typ in [args[0].checked_type, args[1].checked_type]:
if typ.dtype != "float32":
return False
return True
@tvm.ir.register_op_attr("qnn.add", "target.arm_compute_lib")
def qnn_add(expr):
"""Check if the external ACL codegen for add should be used."""
args = expr.args
for typ in [args[0].checked_type, args[1].checked_type]:
if typ.dtype not in ["int8", "uint8"]:
return False
if (
is_per_channel_quantization(zero_point=args[3], scale=args[2])
or is_per_channel_quantization(zero_point=args[5], scale=args[4])
or is_per_channel_quantization(zero_point=args[7], scale=args[6])
):
return False
return True
def is_per_channel_quantization(zero_point, scale):
"""Check if the quantization is per-channel"""
for value in [zero_point, scale]:
shape = value.checked_type.shape
if len(shape) != 0 and shape[0] != 1:
return True
return False
class OpAttrContext(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
| 19,162 | 32.385017 | 97 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""CoreML codegen supported operators."""
import tvm.ir
from tvm.contrib.target.coreml import _convert_map
from ...expr import Constant
def _register_coreml_op(op_name):
"""Register a function to check the given operator is supported by Core ML.
Paramters
---------
op_name : Str
The name of operator that will be registered.
"""
def _check_supported(expr):
attrs, args = expr.attrs, expr.args
if op_name == "nn.conv2d":
if not isinstance(args[1], Constant):
return False
if attrs["kernel_layout"] not in ["HWIO", "OIHW"]:
return False
return True
tvm.ir.register_op_attr(op_name, "target.coremlcompiler", _check_supported)
for op in _convert_map:
_register_coreml_op(op)
| 1,633 | 33.041667 | 79 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/cublas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""cuBLAS Relay integration."""
from typing import Callable, List, Tuple, Dict, Optional
import tvm
import tvm.ir
from tvm import relay
from tvm import te
from tvm.relay import transform
from tvm.contrib import cublas
from ...dataflow_pattern import is_op, wildcard
from .te_target import lower_composite, relay_to_runtime
from .register import register_pattern_table
tvm._ffi.register_func("relay.ext.cublas", relay_to_runtime(tvm.target.cuda()))
def partition_for_cublas(
mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
"""Partition the graph to offload for cuBLAS.
Parameters
----------
mod : tvm.IRModule
The module to partition.
params : Optional[Dict[str, tvm.runtime.NDArray]]
Constant input parameters.
Returns
-------
tvm.IRModule
The partitioned module.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cublas"),
transform.PartitionGraph(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cublas")
def pattern_table() -> List[Tuple[str, relay.Pattern, Callable[[relay.Call], bool]]]:
"""Get the cuBLAS pattern table."""
def matmul_pattern() -> relay.Pattern:
"""Create pattern for matmul."""
return is_op("nn.matmul")(wildcard(), wildcard())
def batch_matmul_pattern() -> relay.Pattern:
"""Create pattern for batch_matmul."""
return is_op("nn.batch_matmul")(wildcard(), wildcard())
def dense_pattern() -> relay.Pattern:
"""Create pattern for dense."""
return is_op("nn.dense")(wildcard(), wildcard())
def check_matmul_like(matched: relay.Call) -> bool:
"""Check if matmul is supported by cuBLAS."""
# Input data types can't be mixed
if matched.args[0].checked_type.dtype != matched.args[1].checked_type.dtype:
return False
in_dtype = matched.args[0].checked_type.dtype
out_dtype = matched.checked_type.dtype
# Only the following data type combinations are supported
if (in_dtype, out_dtype) not in [
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
]:
return False
# If inputs are int8, input column strides must be a multiple of 4
if in_dtype == "int8":
if (
matched.args[0].checked_type.shape[-1] % 4 != 0
or matched.args[1].checked_type.shape[-1] % 4 != 0
):
return False
return True
return [
("cublas.matmul", matmul_pattern(), check_matmul_like),
("cublas.batch_matmul", batch_matmul_pattern(), check_matmul_like),
("cublas.dense", dense_pattern(), check_matmul_like),
]
@lower_composite("cublas.matmul")
def _lower_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a matmul using cuBLAS."""
return cublas.matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
)
@lower_composite("cublas.batch_matmul")
def _lower_batch_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a batch_matmul using cuBLAS."""
return cublas.batch_matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
)
@lower_composite("cublas.dense")
def _lower_dense(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a dense using cuBLAS."""
return cublas.matmul(
inputs[0], inputs[1], transa=False, transb=True, dtype=op.checked_type.dtype
)
| 4,822 | 31.587838 | 85 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/te_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Support a Relay partitioning target using Tensor Expressions."""
from typing import Callable, List, Dict
import tvm
import tvm.ir
from tvm import relay
from tvm import te
_LowerFunc = Callable[[relay.Call, List[te.Tensor]], te.Tensor]
_LOWER_MAP: Dict[str, _LowerFunc] = {}
def lower_composite(comp_name: str) -> Callable[[_LowerFunc], _LowerFunc]:
"""Register a lowering function for a given composite function name."""
def _register(f: _LowerFunc) -> _LowerFunc:
_LOWER_MAP[comp_name] = f
return f
return _register
def relay_to_runtime(target: tvm.target.Target) -> Callable[[relay.Function], tvm.runtime.Module]:
"""Create a Relay to runtime module lowering function using Tensor Expressions for lowering."""
def _relay_to_runtime(partition: relay.Function) -> tvm.runtime.Module:
"""Compile Relay functions to a runtime module using Tensor Expressions."""
assert isinstance(partition, relay.Function)
assert isinstance(partition.body, relay.Call)
assert isinstance(partition.body.op, relay.Function)
global_name = str(partition.attrs.global_symbol)
comp_func = partition.body.op
comp_name = comp_func.attrs["Composite"]
assert comp_name in _LOWER_MAP
assert isinstance(comp_func.body, relay.Call)
op = comp_func.body
inputs = []
for i, param in enumerate(comp_func.params):
inputs.append(
te.placeholder(
param.checked_type.shape,
name=f"input_{i}",
dtype=param.checked_type.dtype,
)
)
output = _LOWER_MAP[comp_name](op, inputs)
prim_func = te.create_prim_func(inputs + [output])
return tvm.build(prim_func, target=target, name=global_name)
return _relay_to_runtime
| 2,648 | 36.309859 | 99 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/cmsisnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Arm(R) CMSIS-NN supported operators for Cortex-M."""
import tvm.ir
from tvm.target import Target
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ...dataflow_pattern import is_constant, is_op, wildcard
from .register import register_pattern_table
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def enabled():
return "cmsis-nn" in Target.list_kinds()
def partition_for_cmsisnn(mod, params=None, mod_name="default", **opts):
"""Partition the graph greedily offloading supported
operators on Cortex-M using CMSIS-NN
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
mod_name: str, optional
The module name
Returns
-------
ret : Module
annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cmsis-nn"),
transform.PartitionGraph(mod_name=mod_name),
GenerateCMSISNNConstants(),
CMSISNNFusePads(),
ScalarToTensorConstants(),
ExtractConstantsFromPartitionedFunction(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cmsis-nn")
def pattern_table():
"""Get the CMSIS-NN compiler pattern table."""
def qnn_softmax_pattern():
"""Create pattern for quantized softmax"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("nn.softmax")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def check_qnn_softmax(pattern):
"""Check if softmax is supported by CMSIS-NN."""
dequantize_call = pattern.args[0].args[0]
scale = pattern.args[1].data.numpy().item(0)
zero_point = pattern.args[2].data.numpy().item(0)
# check for dtypes of quantize and dequantize
return (
(scale == 1.0 / 256 and zero_point == -128)
and pattern.attrs.out_dtype == "int8"
and dequantize_call.args[0].checked_type.dtype == "int8"
)
def qnn_conv2d_pattern(with_pad):
"""Create pattern for qnn.conv2D with optional pad and/or optional fused relu."""
conv2d_input = wildcard()
if with_pad:
conv2d_input = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
conv2d_input,
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
qnn_conv2d | bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def check_qnn_conv2d(pattern):
"""Check if the Conv2D is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
bias_add = None
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
conv2d = bias_add.args[0]
else:
conv2d = requantize_input
conv2d_input = conv2d.args[0]
conv2d_weight = conv2d.args[1]
# check if depthwise Conv2D
kernel_layout = conv2d.attrs.kernel_layout
pos_o = kernel_layout.index("O")
groups = conv2d.attrs.groups
is_depthwise = False
if groups == int(conv2d_input.checked_type.shape[3]) and groups == int(
conv2d_weight.checked_type.shape[pos_o]
):
is_depthwise = True
# check if dtypes are supported for the following entities
# (input_dtype, weight_dtype, bias_dtype, out_dtype, pattern_dtype)
are_dtypes_valid = False
conv2d_input_dtype = conv2d_input.checked_type.dtype
if bias_add:
bias_dtype = bias_add.args[1].checked_type.dtype
else:
# this is only to enable to following check that validates all sorts of dtypes
bias_dtype = "int32" if conv2d_input_dtype == "int8" else "int64"
valid_dtypes = None
if conv2d_input_dtype == "int8":
valid_dtypes = ("int8", "int8", "int32", "int32", "int8")
elif conv2d_input_dtype == "int16":
valid_dtypes = ("int16", "int8", "int64", "int64", "int16")
if (
conv2d_input_dtype,
conv2d_weight.checked_type.dtype,
bias_dtype,
conv2d.attrs.out_dtype,
pattern.checked_type.dtype,
) == valid_dtypes:
are_dtypes_valid = True
# input_zero_point should be 0 when int16
valid_input_zp = True
if conv2d_input_dtype == "int16" and conv2d.args[2].data.numpy().item(0) != 0:
valid_input_zp = False
# kernel zero_point should be 0
kernel_zp = conv2d.args[3].data.numpy()
kernel_zp = [kernel_zp] if kernel_zp.ndim == 0 else kernel_zp
# combination of all checks to decide if pattern is eligible for partitioning
ret = (
are_dtypes_valid
and valid_input_zp
and all([zp == 0 for zp in kernel_zp])
and (not is_depthwise or bias_add is not None)
)
return ret
def check_qnn_conv2d_pad(pattern):
"""Check if the Pad followed by Conv2D is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
conv2d = bias_add.args[0]
else:
conv2d = requantize_input
conv2d_input = conv2d.args[0]
# check if sum of paddings from pad() and conv2d() satisfies CMSIS-NN constraints
can_pad_be_fused = True
if isinstance(conv2d_input, tvm.relay.expr.Call) and str(conv2d_input.op.name) == "nn.pad":
pad_top, pad_left, pad_bottom, pad_right = GetEffectiveConv2DPadding(
conv2d, conv2d_input
)
# check if difference in the side paddings is 1 along each dimension
pad_w_diff = int(pad_right - pad_left)
pad_h_diff = int(pad_bottom - pad_top)
can_pad_be_fused = pad_w_diff in [0, 1] and pad_h_diff in [0, 1]
ret = check_qnn_conv2d(pattern) and can_pad_be_fused
return ret
def qnn_fully_connected_pattern():
"""Create pattern for qnn.dense with optional Relu."""
qnn_fc = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
bias_add = is_op("nn.bias_add")(qnn_fc, is_constant())
req = is_op("qnn.requantize")(
qnn_fc | bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def check_qnn_fully_connected(pattern):
"""Check if the fully connected is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
bias_add = None
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
fc = bias_add.args[0]
else:
fc = requantize_input
fc_input = fc.args[0]
fc_weight = fc.args[1]
are_dtypes_valid = False
fc_input_dtype = fc_input.checked_type.dtype
if bias_add:
bias_dtype = bias_add.args[1].checked_type.dtype
else:
bias_dtype = "int32" if fc_input_dtype == "int8" else "int64"
valid_dtypes = None
if fc_input_dtype == "int8":
valid_dtypes = ("int8", "int8", "int32", "int32", "int8")
elif fc_input_dtype == "int16":
valid_dtypes = ("int16", "int8", "int64", "int64", "int16")
if (
fc_input_dtype,
fc_weight.checked_type.dtype,
bias_dtype,
fc.attrs.out_dtype,
pattern.checked_type.dtype,
) == valid_dtypes:
are_dtypes_valid = True
# kernel zero_point should be 0
kernel_zp = fc.args[3].data.numpy().item(0)
return are_dtypes_valid and kernel_zp == 0
def qnn_avg_pool2d_pattern():
"""Matches average pooling with optional Relu"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
pattern = pattern.optional(is_op("clip"))
return pattern
def check_qnn_avg_pool2d(pattern):
"""Check if avg pool2d is supported by CMSIS-NN."""
output = pattern
if str(pattern.op.name) == "clip":
pooling = pattern.args[0].args[0]
else:
pooling = pattern.args[0]
input_op = pooling.args[0].args[0]
return (
pooling.attrs.layout == "NHWC"
and int(input_op.checked_type.shape[0]) == 1
and (
(input_op.checked_type.dtype == "int8" and output.checked_type.dtype == "int8")
or (input_op.checked_type.dtype == "int16" and output.checked_type.dtype == "int16")
)
)
def qnn_max_pool2d_pattern():
"""Matches max pool2d with optional Relu"""
pattern = is_op("nn.max_pool2d")(wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
def check_qnn_max_pool2d(pattern):
"""Check if max pool2d is supported by CMSIS-NN."""
output = pattern
if str(pattern.op.name) == "clip":
pooling = pattern.args[0]
else:
pooling = pattern
input_op = pooling.args[0]
return (
pooling.attrs.layout == "NHWC"
and int(input_op.checked_type.shape[0]) == 1
and (
(input_op.checked_type.dtype == "int8" and output.checked_type.dtype == "int8")
or (input_op.checked_type.dtype == "int16" and output.checked_type.dtype == "int16")
)
)
def binary_op_pattern(op):
"""Matches QNN binary operation"""
pattern = is_op(f"qnn.{op}")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
return pattern.optional(is_op("clip"))
def check_qnn_binary_op(pattern):
"""Check if binary op is supported by CMSIS-NN."""
binary_op = pattern
if str(pattern.op.name) == "clip":
binary_op = pattern.args[0]
arg0 = binary_op.args[0]
arg1 = binary_op.args[1]
# Check arguments are not scalar.
if (
isinstance(arg0, tvm.relay.expr.Constant)
and len(arg0.checked_type.shape) == 0
and isinstance(arg1, tvm.relay.expr.Constant)
and len(arg1.checked_type.shape) == 0
):
return False
arg0_type = arg0.checked_type.dtype
arg1_type = arg1.checked_type.dtype
# Check arguments are of valid type.
if arg0_type not in ["int8", "int16"]:
return False
# Check arguments are the same type.
if arg0_type != arg1_type:
return False
# Check zero points are non-zero (arm_elementwise_(add|mul)_s16 does not
# handle non-zero zero points).
if arg0_type == "int16" and str(binary_op.op.name) in ["qnn.add", "qnn.mul"]:
arg_0_zero_point = binary_op.args[3].data.numpy()
arg_1_zero_point = binary_op.args[5].data.numpy()
output_zero_point = binary_op.args[7].data.numpy()
if any([arg_0_zero_point, arg_1_zero_point, output_zero_point]):
return False
return True
return [
("cmsis-nn.qnn_conv2d", qnn_conv2d_pattern(with_pad=True), check_qnn_conv2d_pad),
("cmsis-nn.qnn_conv2d", qnn_conv2d_pattern(with_pad=False), check_qnn_conv2d),
("cmsis-nn.qnn_fully_connected", qnn_fully_connected_pattern(), check_qnn_fully_connected),
("cmsis-nn.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_qnn_avg_pool2d),
("cmsis-nn.qnn_max_pool2d", qnn_max_pool2d_pattern(), check_qnn_max_pool2d),
("cmsis-nn.qnn_mul", binary_op_pattern("mul"), check_qnn_binary_op),
("cmsis-nn.qnn_add", binary_op_pattern("add"), check_qnn_binary_op),
("cmsis-nn.qnn_softmax", qnn_softmax_pattern(), check_qnn_softmax),
]
| 14,170 | 35.807792 | 100 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/bnns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""BNNS library supported operators.
Is a part of Accelerate framework on macOS/iOS platforms. Apple provide several APIs
to handle tensor processing. Particularly:
* BNNS (basic neural )
* vDSP (1D and 2D tensor processing)
"""
import math
import tvm.ir
from tvm.relay import transform
from tvm.relay.expr import const
from tvm.relay.build_module import bind_params_by_name
from .register import register_pattern_table, get_pattern_table
from ...dataflow_pattern import wildcard, is_op, is_expr
def partition_for_bnns(mod, params=None):
"""Partition the graph greedily offloading supported
operators to BNNS.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.DynamicToStatic(),
transform.AlterOpLayout(),
# TODO(apeskov): WA. AlterOpLayout call lead to constants shape transformation
# Some expand_dims op may appears after constants. It breaks BNNS fusing.
# So we have to call FoldConstant right before bnns composite passes.
transform.FoldConstant(),
transform.MergeComposite(get_pattern_table("bnns")),
transform.AnnotateTarget("bnns"),
# If you no need in per layer performance statistic you can
# uncomment next line
# transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by BNNS.
Parameters
----------
op_name : Str
The name of supported operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by BNNS.
"""
@tvm.ir.register_op_attr(op_name, "target.bnns")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_matmul")
@tvm.ir.register_op_attr("nn.max_pool2d", "target.bnns")
def max_pool2d_check(expr):
"""Check if the nn.max_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.avg_pool2d", "target.bnns")
def avg_pool2d_check(expr):
"""Check if the nn.avg_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.global_max_pool2d", "target.bnns")
def global_max_pool2d_check(expr):
"""Check if the nn.global_max_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.global_avg_pool2d", "target.bnns")
def global_avg_pool2d_check(expr):
"""Check if the nn.global_avg_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
def dtype_is_supported(dtype):
"""Check if data type is supported by BNNS backend"""
return dtype in ("", "float32")
@tvm.ir.register_op_attr("nn.conv2d", "target.bnns")
def conv2d_check(expr):
"""Check if the conv2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype != "float32":
return False
if attrs.data_layout != "NCHW":
return False
if not dtype_is_supported(attrs.out_dtype):
return False
return True
def bias_check(expr):
"""Check is bias added through the correct dimension"""
attrs, args = expr.attrs, expr.args
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
if expr.op.name == "nn.bias_add":
return attrs.axis == 1
if expr.op.name == "add":
b_shape = args[1].checked_type.shape
if len(b_shape) == 4:
return bool(b_shape[0] == 1 and b_shape[2] == 1 and b_shape[3] == 1)
if len(b_shape) == 3:
return bool(b_shape[1] == 1 and b_shape[2] == 1)
return False
@tvm.ir.register_op_attr("nn.dense", "target.bnns")
def dense(expr):
"""Check if the dense can be used in BNNS."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype != "float32":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
return True
def make_conv_pattern(with_bias=True, activation="none"):
"""Make pattern for bnns.conv2d primitive"""
data = wildcard()
weight = wildcard()
bias = wildcard()
pat = is_op("nn.conv2d")(data, weight)
if with_bias:
pat = is_op("add")(pat, bias) | is_op("nn.bias_add")(pat, bias)
if activation == "relu":
pat = is_op("nn.relu")(pat)
elif activation == "sigmoid":
pat = is_op("sigmoid")(pat)
return pat
def check_conv(extract):
"""Check conv pattern is supported by BNNS."""
bias_is_ok = True
call = extract
while call.op.name != "nn.conv2d":
if call.op.name in ("nn.bias_add", "add"):
bias_is_ok &= bias_check(call)
call = call.args[0]
return conv2d_check(call) and bias_is_ok
def make_dense_bias_pattern():
"""Make pattern for bnns.dense primitive"""
data = wildcard()
weight = wildcard()
bias = wildcard()
d = is_op("nn.dense")(data, weight)
return is_op("add")(d, bias)
def make_dense_bias_gelu_pattern():
"""Make pattern for bnns.dense primitive with fused bias and gelu activation"""
dense_bias = make_dense_bias_pattern()
const1 = is_expr(const(0.044715))
const2 = is_expr(const(math.sqrt(2 / math.pi)))
gelu = is_op("power")(dense_bias, is_expr(const(3, dtype="float32")))
gelu = is_op("multiply")(gelu, const1)
gelu = is_op("add")(gelu, dense_bias)
gelu = is_op("multiply")(gelu, const2)
gelu = is_op("tanh")(gelu)
gelu = is_op("add")(gelu, is_expr(const(1, dtype="float32")))
gelu = is_op("multiply")(gelu, is_expr(const(0.5)))
gelu = is_op("multiply")(gelu, dense_bias)
return gelu
def check_dense(extract):
"""Check dense pattern is supported by BNNS."""
call = extract
while call.op.name != "nn.dense":
call = call.args[0]
return dense(call)
@tvm.ir.register_op_attr("nn.instance_norm", "target.bnns")
def instance_norm_check(expr):
"""Check if the nn.instance_norm can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant) or not isinstance(
args[2], tvm.relay.expr.Constant
):
return False
if attrs.axis == 0 and rank == 3 or attrs.axis == 1 and rank == 4:
return True
return False
@register_pattern_table("bnns")
def pattern_table():
"""Get BNNS specific fusing patterns collection"""
conv2d_bias_pat = (
"bnns.conv2d_bias",
make_conv_pattern(with_bias=True),
check_conv,
)
conv2d_bias_relu_pat = (
"bnns.conv2d_bias_relu",
make_conv_pattern(with_bias=True, activation="relu"),
check_conv,
)
conv2d_relu_pat = (
"bnns.conv2d_relu",
make_conv_pattern(with_bias=False, activation="relu"),
check_conv,
)
conv2d_bias_sigmoid_pat = (
"bnns.conv2d_bias_sigmoid",
make_conv_pattern(with_bias=True, activation="sigmoid"),
check_conv,
)
conv2d_sigmoid_pat = (
"bnns.conv2d_sigmoid",
make_conv_pattern(with_bias=False, activation="sigmoid"),
check_conv,
)
dense_bias_gelu = ("bnns.dense_bias_gelu", make_dense_bias_gelu_pattern(), check_dense)
dense_bias = ("bnns.dense_bias", make_dense_bias_pattern(), check_dense)
bnns_patterns = [
conv2d_bias_relu_pat,
conv2d_relu_pat,
conv2d_bias_sigmoid_pat,
conv2d_sigmoid_pat,
conv2d_bias_pat,
dense_bias_gelu,
dense_bias,
]
return bnns_patterns
| 10,485 | 30.969512 | 91 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Contrib modules."""
from .register import get_pattern_table, register_pattern_table
from .arm_compute_lib import *
from .dnnl import *
from .bnns import *
from .coreml import *
from .ethosn import *
from .libtorch import *
from .tensorrt import *
from .cutlass import *
from .clml import *
| 1,113 | 36.133333 | 63 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/clml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, pointless-exception-statement
"""CLML Library supported operators."""
import json
from string import Template
import tvm
from tvm import relay
from tvm.ir import Op
from tvm._ffi import register_func
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay import function as _function
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.expr import Call, TupleGetItem
from ...dataflow_pattern import wildcard, is_op, is_constant, is_tuple_get_item, is_tuple
from .register import register_pattern_table
from ..strategy.generic import is_depthwise_conv2d
def clml_sdk_version():
"""Utility function to get clml version version"""
return int(tvm.support.libinfo().get("TVM_CLML_VERSION", 2))
def is_clml_runtime_enabled():
"""Check if the CLML graph runtime is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.op.is_clml_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
class RemoveDropout(ExprMutator):
"""
Removes all nn.dropout from an expr.
"""
def visit_tuple_getitem(self, op: TupleGetItem) -> relay.expr.Expr:
visit = super().visit_tuple_getitem(op)
if visit.index != 0:
return visit
if (
isinstance(visit.tuple_value, Call)
and isinstance(visit.tuple_value.op, Op)
and visit.tuple_value.op.name == "nn.dropout"
and visit.index == 0
):
return visit.tuple_value.args[0]
return visit
@transform.function_pass(opt_level=0)
class RemoveDropoutPass:
def transform_function(
self, func: relay.function.Function, mod: tvm.IRModule, _: tvm.transform.PassContext
) -> relay.function.Function:
return RemoveDropout().visit(func)
class BroadcastInputs(ExprMutator):
"""
Binary operators need broadcasting for CLML.
"""
def visit_call(self, call):
if call.op.name in ["add", "subtract", "multiply", "divide", "maximum", "minimum"]:
new_fn = self.visit(call.op)
call_shape = call.checked_type.shape
lhs = call.args[0]
rhs = call.args[1]
lhs_shape = lhs.checked_type.shape
rhs_shape = rhs.checked_type.shape
if list(call_shape) != list(lhs_shape):
lhs = relay.broadcast_to(self.visit(lhs), call_shape)
if list(call_shape) != list(rhs_shape):
rhs = relay.broadcast_to(self.visit(rhs), call_shape)
args = [lhs, rhs]
return Call(new_fn, args, call.attrs)
return super().visit_call(call)
@transform.function_pass(opt_level=0)
class BinaryOpBroadcaster:
def transform_function(
self, func: relay.function.Function, mod: tvm.IRModule, _: tvm.transform.PassContext
) -> relay.function.Function:
return BroadcastInputs().visit(func)
def partition_for_clml(mod, params=None, **opts):
"""Partition the graph greedily offloading supported
operators to CLML Library.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
RemoveDropoutPass(),
BinaryOpBroadcaster(),
transform.FoldConstant(),
transform.MergeComposite(clml_pattern_table()),
transform.AnnotateTarget("clml", False),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
result_mod = seq(mod)
return result_mod
@register_func("relay.ext.clml.optimize")
def preprocess_module(mod):
"""
Pre-process a module containing functions ready for CLML codegen. For now we enforce OIHW
kernel layout and fold the transforms away.
Parameters
----------
mod : Module
The module to run passes on.
Returns
-------
preprocessed_mod : The processed module.
"""
def alter_conv(attrs, inputs, tinfos, out_type):
new_attrs = dict(attrs)
data_info = tinfos[0]
weight_info = tinfos[1]
(desired_data_layout, desired_kernel_layout) = ("NCHW", "OIHW")
new_attrs["data_layout"] = desired_data_layout
new_attrs["kernel_layout"] = desired_kernel_layout
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
dkl = desired_kernel_layout
new_attrs["kernel_layout"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]
return relay.nn.conv2d(*inputs, **new_attrs)
with OpAttrContext("nn.conv2d", "FTVMAlterOpLayout", alter_conv):
seq = tvm.transform.Sequential(
[
transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"]}),
transform.ConvertLayout({"nn.conv2d_transpose": ["NCHW", "OIHW"]}),
transform.AlterOpLayout(),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
preprocessed_mod = seq(mod)
return preprocessed_mod
def preprocess_for_clml(mod):
"""Preprocessing pass to alter the layouts for CLML compiler target"""
for _var in mod.get_global_vars():
if _var.name_hint == "main":
continue
fn = mod[_var.name_hint]
if "Compiler" in fn.attrs.keys() and fn.attrs["Compiler"] == "clml":
new_fn = fn.body
clml_mod = tvm.IRModule.from_expr(new_fn)
with tvm.transform.PassContext(opt_level=3):
clml_mod = preprocess_module(clml_mod)
new_body = clml_mod["main"].body
mod[_var.name_hint] = _function.Function(
fn.params, new_body, fn.ret_type, fn.type_params, fn.attrs
)
return mod
@register_pattern_table("clml")
def clml_pattern_table():
"""Get the CLML pattern table."""
def conv_pattern():
"""Create a convolution pattern."""
pattern = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(lambda x: is_op("add")(x, is_constant()))
pattern = pattern.optional(
lambda x: is_tuple_get_item(
is_op("nn.batch_norm")(
x, is_constant(), is_constant(), is_constant(), is_constant()
)
)
)
pattern = pattern.optional(is_op("nn.relu"))
pattern = pattern.optional(is_op("clip"))
return pattern
def conv_transpose_pattern():
"""Create a transposed convolution pattern."""
pattern = is_op("nn.conv2d_transpose")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(lambda x: is_op("add")(x, is_constant()))
pattern = pattern.optional(
lambda x: is_tuple_get_item(
is_op("nn.batch_norm")(
x, is_constant(), is_constant(), is_constant(), is_constant()
)
)
)
pattern = pattern.optional(is_op("nn.relu"))
pattern = pattern.optional(is_op("clip"))
return pattern
def pad_conv_pattern():
"""Create a pad with convolution pattern."""
pattern = is_op("nn.pad")(wildcard(), is_constant())
pattern = is_op("nn.conv2d")(pattern, is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(lambda x: is_op("add")(x, is_constant()))
pattern = pattern.optional(
lambda x: is_tuple_get_item(
is_op("nn.batch_norm")(
x, is_constant(), is_constant(), is_constant(), is_constant()
)
)
)
pattern = pattern.optional(is_op("nn.relu"))
pattern = pattern.optional(is_op("clip"))
return pattern
def batch_norm_pattern():
"""Create a batch norm pattern."""
pattern = is_op("nn.batch_norm")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_tuple_get_item(pattern)
return pattern
def concat_pattern():
"""Create a concat pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the concat pattern.
"""
pattern = is_tuple(None)
pattern = is_op("concatenate")(pattern)
return pattern
def dense_pattern():
"""Create a dense pattern."""
pattern = is_op("nn.dense")(wildcard(), is_constant())
return pattern
def pad_pattern():
"""Create a pad pattern."""
pattern = is_op("nn.pad")(wildcard(), is_constant())
return pattern
def check_conv(extract):
"""Check conv pattern is supported by CLML."""
call = extract
clip_found = False
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "nn.relu":
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "clip":
clip_found = True
if call.attrs["a_min"] != 0.0 or call.attrs["a_max"] != 6.0:
return False
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
while call.op.name != "nn.conv2d":
call = call.args[0]
attrs, args = call.attrs, call.args
if attrs.data_layout != "NCHW":
return False
if (
(not clip_found)
and (attrs.kernel_size[0] == 3)
and (attrs.dilation[0] != 1)
and (attrs.groups != 1)
and (attrs.channels == attrs.groups)
):
return False
data_typ = args[0].checked_type
kernel_typ = args[1].checked_type
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if attrs.groups != 1 and not is_depthwise:
return False
return True
def check_conv_transpose(extract):
"""Check transposed conv pattern is supported by CLML."""
call = extract
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "nn.relu":
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "clip":
if call.attrs["a_min"] != 0.0 or call.attrs["a_max"] != 6.0:
return False
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
while call.op.name != "nn.conv2d_transpose":
call = call.args[0]
attrs = call.attrs
if attrs.data_layout != "NCHW":
return False
return True
def check_binary_op(extract):
call = extract
# Scalars are not supported
if len(call.args[1].checked_type.shape) == 0:
return False
for arg in call.args:
# Avoid any operators with dtype Int64
if arg.checked_type.dtype == "int64":
return False
# No support for batch> 1
if arg.checked_type.shape[0] > 1:
return False
return True
def check_pad_op(extract):
call = extract
if len(call.attrs["pad_width"]) != 4:
return False
# CLML can't process Tensor padding with out knowing layout.
# Pad layers before any convolution are not guarenteed to be NCHW.
if isinstance(call.args[0], tvm.relay.expr.Var):
return False
return True
def check_softmax_op(extract):
call = extract
if len(call.args[0].checked_type.shape) > 2:
return False
return True
def check_upsampling_op(extract):
call = extract
if call.attrs["method"] != "bilinear":
return False
return True
def check_concat_op(extract):
call = extract
if call.attrs["axis"] != 1:
return False
return True
def check_default_op(extract):
call = extract
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
# Avoid any operators with dtype Int64
for arg in call.args:
if arg.checked_type.dtype == "int64":
return False
return True
def check_batch_matmul_op(extract):
call = extract
# Only support single Matmul
if call.args[0].checked_type.shape[0] > 1:
return False
if call.args[1].checked_type.shape[0] > 1:
return False
return True
return [
("clml.pad_conv2d", pad_conv_pattern(), check_conv),
("clml.conv2d", conv_pattern(), check_conv),
("clml.conv2d_transpose", conv_transpose_pattern(), check_conv_transpose),
("clml.dense", dense_pattern(), check_default_op),
("clml.pad", pad_pattern(), check_pad_op),
("clml.concat", concat_pattern(), check_concat_op),
("clml.batch_norm", batch_norm_pattern(), check_default_op),
("clml.add", is_op("add")(wildcard(), wildcard()), check_binary_op),
("clml.subtract", is_op("subtract")(wildcard(), wildcard()), check_binary_op),
("clml.multiply", is_op("multiply")(wildcard(), wildcard()), check_binary_op),
("clml.divide", is_op("divide")(wildcard(), wildcard()), check_binary_op),
("clml.minimum", is_op("minimum")(wildcard(), wildcard()), check_binary_op),
("clml.maximum", is_op("maximum")(wildcard(), wildcard()), check_binary_op),
("clml.softmax", is_op("nn.softmax")(wildcard()), check_softmax_op),
# ("clml.reshape", is_op("reshape")(wildcard()), check_default_op),
("clml.avg_pool2d", is_op("nn.avg_pool2d")(wildcard()), check_default_op),
("clml.max_pool2d", is_op("nn.max_pool2d")(wildcard()), check_default_op),
("clml.global_avg_pool2d", is_op("nn.global_avg_pool2d")(wildcard()), check_default_op),
("clml.global_max_pool2d", is_op("nn.global_max_pool2d")(wildcard()), check_default_op),
("clml.relu", is_op("nn.relu")(wildcard()), check_default_op),
("clml.clip", is_op("clip")(wildcard()), check_default_op),
("clml.batch_flatten", is_op("nn.batch_flatten")(wildcard()), check_default_op),
("clml.depth_to_space", is_op("nn.depth_to_space")(wildcard()), check_default_op),
("clml.upsampling", is_op("nn.upsampling")(wildcard()), check_upsampling_op),
(
"clml.batch_matmul",
is_op("nn.batch_matmul")(wildcard(), wildcard()),
check_batch_matmul_op,
),
]
def _register_external_op_helper(op_name, supported=True):
@tvm.ir.register_op_attr(op_name, "target.clml")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("minimum")
_register_external_op_helper("maximum")
class OpAttrContext(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
class CLMLGetSubModuleSrc:
"""Generates CLML API one CLML sub module out ot global TVM module"""
def __init__(self, cmod):
"""Initialize
Parameters
----------
cmod : Module
The CLML sub module from TVM module
"""
self.cmod = cmod
self.codegen = None
self.nodes = None
self.node_map = {}
self.input_meta = []
self.output_meta = []
self.clml_code = []
self.sub_module_name = None
self.MakeCLMLTensor = Template(
"""auto $name = runner.MakeCLMLTensor
(std::vector<size_t>({$shape}), "$dtype", $layout);"""
)
self.MapInsert = Template("""runner.storage_map.insert({"$nid", $tensor_desc});""")
self.MakeConv2D = Template(
"""
// Convolution / Depthwise Convolution
runner.MakeConv2D($input_tensor,
$weight_tensor,
$bias_tensor,
$output_tensor,
std::vector<cl_uint>({$padding}),
std::vector<cl_uint>({$dilation}),
std::vector<cl_uint>({$strides}),
$groups,
$mode,
$activation,
$has_bias,
$has_act,
"$dtype");"""
)
self.MakeConv2DWithBN = Template(
"""
// Batchnorm
runner.MakeConv2DWithBN($input_tensor,
$weight_tensor,
$bias_tensor,
$output_tensor,
$bn_scale_tensor,
$bn_bias_tensor,
$bn_mean_tensor,
$bn_var_tensor,
std::vector<float> ({$bn_attrs}),
std::vector<cl_uint> ({$padding}),
std::vector<cl_uint> ({$dilation}),
std::vector<cl_uint> ({$strides}),
$groups,
$mode,
$activation,
$has_bias,
$has_act,
"$dtype");"""
)
self.MakeRelu = Template(
"""
// Relu / Relu6
runner.MakeRelu($input_tensor, $output_tensor, $relu_type, "$dtype");
"""
)
self.MakeBN = Template(
"""
// Batchnorm
runner.MakeBatchNorm($input_tensor,
$output_tensor,
$bn_scale_tensor,
$bn_bias_tensor,
$bn_mean_tensor,
$bn_var_tensor,
std::vector<float> ({$bn_attrs}), "$dtype");"""
)
self.MakePool2D = Template(
"""
// Pool2D
runner.MakePool2D($input_tensor,
$output_tensor,
std::vector<cl_uint> ({$pool_size}),
std::vector<cl_uint> ({$strides}),
std::vector<cl_uint> ({$padding}),
"$pool_type", "$dtype");"""
)
self.MakeGlobalPool2D = Template(
"""
// GlobalPool2D
runner.MakeGlobalPool2D($input_tensor,
$output_tensor,
std::vector<cl_uint> ({$in_shape}),
"$pool_type", "$dtype");"""
)
self.MakeReshape = Template(
"""
// Reshape
runner.MakeReshape($input_tensor,
$output_tensor, "$dtype");"""
)
self.MakeConcatenate = Template(
"""
// Concatinate
runner.MakeConcatenate(
std::vector<std::shared_ptr<cl_ml_tensor_memory_desc_qcom>> ({$in_list}),
$output_tensor,
$axis, "$dtype");"""
)
self.MakeDense = Template(
"""
// Dense
runner.MakeDense($input_tensor,
$weight_tensor,
$output_tensor,
std::vector<cl_uint> ({$in_shape}),
std::vector<cl_uint> ({$wt_shape}),
"$dtype");"""
)
self.MakeSoftMax = Template(
"""
// Softmax
runner.MakeSoftMax($input_tensor,
$output_tensor, "$dtype");"""
)
self.MakePad = Template(
"""
// Pad
runner.MakePad($input_tensor,
$output_tensor,
"$pad_mode",
std::vector<cl_uint> ({$padding}), "$dtype");"""
)
self.MakeBatchFlatten = Template(
"""
// BatchFlatten
runner.MakeBatchFlatten($input_tensor,
$output_tensor, "$dtype");"""
)
self.MakeClip = Template(
"""
// Clip
runner.MakeClip($input_tensor,
$output_tensor,
$a_max,
$a_min,
"$dtype");"""
)
self.MakeBinaryOp = Template(
"""
// BinaryOp
runner.MakeBinaryOp($input_a,
$input_b,
$output_tensor,
"$op", "$dtype");"""
)
self.MakeHeader = Template(
"""
CLMLRunner $module(std::string name,
ToolArgs& args,
cl_platform_id arg_platform_id,
cl_context arg_context,
cl_device_id arg_device_id,
cl_command_queue arg_queue) {
CLMLRunner runner = CLMLRunner(name,
args,
arg_platform_id,
arg_context,
arg_device_id,
arg_queue);
runner.MakeUnusedTensor();
"""
)
self.MakeFooter = Template(
"""
return runner;
}
"""
)
self.MakeMetaInfo = Template(
"runner.SetMetaInfo("
'"Subgraph Name: $name\\n Input Count : $input_count\\n'
" Output Count : $output_count\\n"
' Input MetaInfo\\n$input_meta\\n Output MetaInfo\\n$output_meta");'
)
self.MakeInputMetaInfo = Template(
" Input: $in_name\\n Dtype : $dtype\\n Shape : [$shape]\\n"
)
self.MakeOutputMetaInfo = Template(
" Output: $out_name\\n Dtype : $dtype\\n Shape : [$shape]\\n"
)
def get_src(self):
"""Returns pair of sub module name and the generated source"""
self.codegen = json.loads(self.cmod.get_source("json"))
self.sub_module_name = self.codegen["symbol"]
self.nodes = self.codegen["nodes"]
self.clml_code.append(self.MakeHeader.substitute(module=self.sub_module_name))
def get_tensor_from_map(
node_seq, shape=None, layout="CL_TENSOR_LAYOUT_OPTIMAL_QCOM", dtype="float32"
):
if node_seq in self.node_map:
return self.node_map[node_seq]
else:
node = self.nodes[node_seq]
dtype = str(node["attrs"]["dtype"][0][0])
if node["op"] == "input":
self.clml_code.append("// Input Node")
node_out_name = self.sub_module_name + "_" + "input_" + str(node_seq)
else:
node_out_name = node["name"]
if shape is None:
shape = str(tuple(node["attrs"]["shape"][0][0]))[1:-1]
self.clml_code.append(
self.MakeCLMLTensor.substitute(
name=node_out_name, shape=shape, dtype=dtype, layout=layout
)
)
self.clml_code.append(
self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)
)
if node["op"] == "input":
self.clml_code.append(
Template("runner.inputs.push_back($clml_input);").substitute(
clml_input=node_out_name
)
)
self.input_meta.append(
self.MakeInputMetaInfo.substitute(
in_name=node_out_name, dtype=dtype, shape=shape
)
)
if self.nodes[node_seq]["op"] == "const":
self.clml_code.append(
Template('runner.consts.push_back("$nid");').substitute(nid=node["name"])
)
self.node_map[node_seq] = node_out_name
return node_out_name
def make_output_tensor(
node, node_seq, shape=None, layout="CL_TENSOR_LAYOUT_OPTIMAL_QCOM", dtype="float32"
):
if dtype is None:
dtype = str(node["attrs"]["dtype"][0][0])
if shape is None:
shape = str(tuple(node["attrs"]["shape"][0][0]))[1:-1]
node_out_name = self.sub_module_name + "_" + "layer_out_" + str(node_seq)
self.clml_code.append(
self.MakeCLMLTensor.substitute(
name=node_out_name,
shape=shape,
dtype=dtype,
layout=layout,
)
)
return node_out_name
for node_seq, node in enumerate(self.nodes):
if node["op"] == "kernel":
self.clml_code.append("// Kernel Node : " + node["name"])
if node["name"] == "nn.conv2d" or node["name"] == "nn.depthwise_conv2d":
if "padding" in node["attrs"]:
padding = str(tuple(int(x) for x in node["attrs"]["padding"][0]))[1:-1]
else:
padding = "0, 0, 0, 0"
dilation = str(tuple(int(x) for x in node["attrs"]["dilation"][0]))[1:-1]
strides = str(tuple(int(x) for x in node["attrs"]["strides"][0]))[1:-1]
groups = node["attrs"]["groups"][0][0]
if node["name"] == "nn.conv2d":
mode = "CL_CONVOLUTION_MODE_CONVOLUTION_QCOM"
else:
mode = "CL_CONVOLUTION_MODE_DEPTHWISE_QCOM"
activation = "CL_ACTIVATION_RELU"
has_act = False
if "activation_type" in node["attrs"]:
has_act = True
activation = node["attrs"]["activation_type"][0][0]
if activation == "relu":
activation = "CL_ACTIVATION_RELU"
elif activation == "relu6":
activation = "CL_ACTIVATION_RELU6"
else:
RuntimeError("Unknown activation:" + activation)
has_bias = bool((node["inputs"] == 3) or (node["inputs"] == 7))
has_bn = bool((node["inputs"] == 6) or (node["inputs"] == 7))
input_tensor = get_tensor_from_map(node["inputs"][0][0])
weight_tensor = get_tensor_from_map(node["inputs"][1][0])
if not has_bias:
bias_tensor = "runner.unusedTensor"
else:
bias_tensor = get_tensor_from_map(node["inputs"][2][0])
node_out_name = make_output_tensor(node, node_seq)
if not has_bn:
self.clml_code.append(
self.MakeConv2D.substitute(
input_tensor=input_tensor,
weight_tensor=weight_tensor,
bias_tensor=bias_tensor,
output_tensor=node_out_name,
padding=padding,
dilation=dilation,
strides=strides,
groups=groups,
mode=mode,
activation=activation,
has_bias="true" if has_bias else "false",
has_act="true" if has_act else "false",
dtype=node["attrs"]["dtype"][0][0],
)
)
else:
bn_index = 3 if has_bias else 2
bn_attrs = tuple(node["attrs"]["batchnorm"][0][0])
axis = bn_attrs[0]
bn_shape = [1, 1, 1, 1]
bn_node = self.nodes[node["inputs"][bn_index][0]]
bn_shape[axis] = bn_node["attrs"]["shape"][0][0]
dtype = bn_node["attrs"]["dtype"][0][0]
bn_scale_tensor = get_tensor_from_map(
node["inputs"][bn_index][0],
shape=str(tuple(bn_shape))[1:-1],
dtype=dtype,
)
bn_bias_tensor = get_tensor_from_map(
node["inputs"][bn_index + 1][0],
shape=str(tuple(bn_shape))[1:-1],
dtype=dtype,
)
bn_mean_tensor = get_tensor_from_map(
node["inputs"][bn_index + 2][0],
shape=str(tuple(bn_shape))[1:-1],
dtype=dtype,
)
bn_var_tensor = get_tensor_from_map(
node["inputs"][bn_index + 3][0],
shape=str(tuple(bn_shape))[1:-1],
dtype=dtype,
)
self.clml_code.append(
self.MakeConv2DWithBN.substitute(
input_tensor=input_tensor,
weight_tensor=weight_tensor,
bias_tensor=bias_tensor,
output_tensor=node_out_name,
bn_scale_tensor=bn_scale_tensor,
bn_bias_tensor=bn_bias_tensor,
bn_mean_tensor=bn_mean_tensor,
bn_var_tensor=bn_var_tensor,
bn_attrs=str(bn_attrs)[1:-1],
padding=padding,
dilation=dilation,
strides=strides,
groups=groups,
mode=mode,
activation=activation,
has_bias="true" if has_bias else "false",
has_act="true" if has_act else "false",
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.relu6" or node["name"] == "nn.relu":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
relu_type = (
"CL_ACTIVATION_RELU" if node["name"] == "nn.relu" else "CL_ACTIVATION_RELU6"
)
self.clml_code.append(
self.MakeRelu.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
relu_type=relu_type,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.batch_norm":
bn_attrs = tuple(node["attrs"]["batchnorm"][0][0])
axis = bn_attrs[0]
bn_shape = [1, 1, 1, 1]
bn_node = self.nodes[node["inputs"][0][0]]
bn_shape[axis] = bn_node["attrs"]["shape"][0][0]
dtype = bn_node["attrs"]["dtype"][0][0]
bn_scale_tensor = get_tensor_from_map(
node["inputs"][0][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype
)
bn_bias_tensor = get_tensor_from_map(
node["inputs"][1][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype
)
bn_mean_tensor = get_tensor_from_map(
node["inputs"][2][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype
)
bn_var_tensor = get_tensor_from_map(
node["inputs"][3][0], shape=str(tuple(bn_shape))[1:-1], dtype=dtype
)
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
self.clml_code.append(
self.MakeBN.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
bn_scale_tensor=bn_scale_tensor,
bn_bias_tensor=bn_bias_tensor,
bn_mean_tensor=bn_mean_tensor,
bn_var_tensor=bn_var_tensor,
bn_attrs=str(bn_attrs)[1:-1],
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] in ["nn.max_pool2d", "nn.avg_pool2d", "nn.l2_pool2d"]:
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
pool_size = str(tuple(int(x) for x in node["attrs"]["pool_size"][0]))[1:-1]
strides = str(tuple(int(x) for x in node["attrs"]["strides"][0]))[1:-1]
padding = str(tuple(int(x) for x in node["attrs"]["padding"][0]))[1:-1]
self.clml_code.append(
self.MakePool2D.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
pool_size=pool_size,
strides=strides,
padding=padding,
pool_type=node["name"],
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] in ["nn.global_max_pool2d", "nn.global_avg_pool2d"]:
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
in_node = self.nodes[node["inputs"][0][0]]
in_shape = str(tuple(in_node["attrs"]["shape"][0][0]))[1:-1]
self.clml_code.append(
self.MakeGlobalPool2D.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
in_shape=in_shape,
pool_type=node["name"],
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "reshape":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
self.clml_code.append(
self.MakeReshape.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "concatenate":
input_len = len(node["inputs"])
in_list = str(
[get_tensor_from_map(node["inputs"][x][0]) for x in range(input_len)]
)[1:-1]
node_out_name = make_output_tensor(node, node_seq)
axis = node["attrs"]["axis"][0][0]
self.clml_code.append(
self.MakeConcatenate.substitute(
in_list=in_list,
output_tensor=node_out_name,
axis=axis,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.dense":
in_node = self.nodes[node["inputs"][0][0]]
in_shape = tuple(in_node["attrs"]["shape"][0][0])
wt_shape = tuple(in_node["attrs"]["shape"][0][0])
input_tensor = get_tensor_from_map(
node["inputs"][0][0], layout="CL_TENSOR_LAYOUT_NCHW_QCOM"
)
weight_tensor = get_tensor_from_map(
node["inputs"][1][0],
shape=str(tuple([1, 1, wt_shape[0], wt_shape[1]]))[1:-1],
layout="CL_TENSOR_LAYOUT_NCHW_QCOM",
)
node_out_name = make_output_tensor(
node,
node_seq,
shape=str(tuple([in_shape[0], wt_shape[0], 1, 1]))[1:-1],
layout="CL_TENSOR_LAYOUT_NCHW_QCOM",
)
self.clml_code.append(
self.MakeDense.substitute(
input_tensor=input_tensor,
weight_tensor=weight_tensor,
output_tensor=node_out_name,
in_shape=str(in_shape)[1:-1],
wt_shape=str(wt_shape)[1:-1],
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.softmax":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
self.clml_code.append(
self.MakeSoftMax.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.pad":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
pad_mode = node["attrs"]["pad_mode"][0][0]
padding = str(tuple(int(x) for x in node["attrs"]["pad_width"][0]))[1:-1]
self.clml_code.append(
self.MakePad.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
pad_mode=pad_mode,
padding=padding,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "nn.batch_flatten":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
self.clml_code.append(
self.MakeBatchFlatten.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] == "clip":
input_tensor = get_tensor_from_map(node["inputs"][0][0])
node_out_name = make_output_tensor(node, node_seq)
a_max = node["attrs"]["a_max"][0][0]
a_min = node["attrs"]["a_min"][0][0]
self.clml_code.append(
self.MakeClip.substitute(
input_tensor=input_tensor,
output_tensor=node_out_name,
a_max=a_max,
a_min=a_min,
dtype=node["attrs"]["dtype"][0][0],
)
)
elif node["name"] in [
"add",
"subtract",
"multiply",
"minimum",
"maximum",
"divide",
]:
input_a = get_tensor_from_map(node["inputs"][0][0])
input_b = get_tensor_from_map(node["inputs"][1][0])
node_out_name = make_output_tensor(node, node_seq)
self.clml_code.append(
self.MakeBinaryOp.substitute(
input_a=input_a,
input_b=input_b,
output_tensor=node_out_name,
op=node["name"],
dtype=node["attrs"]["dtype"][0][0],
)
)
else:
RuntimeError("Unsupported Op:" + node["name"])
self.clml_code.append(
self.MapInsert.substitute(nid=node_out_name, tensor_desc=node_out_name)
)
self.node_map[node_seq] = node_out_name
elif node["op"] not in ["const", "input"]:
print("Unknown Node type:", node["op"])
# Populate outputs
out_nodes = self.codegen["heads"]
self.clml_code.append("// Populate outputs")
for nid_triple in out_nodes:
nid = nid_triple[0]
out_node = self.nodes[nid]
dtype = str(out_node["attrs"]["dtype"][0][0])
shape = str(tuple(out_node["attrs"]["shape"][0][0]))[1:-1]
out_name = self.sub_module_name + "_" + "layer_out_" + str(nid)
self.clml_code.append(
Template(
'runner.outputs.insert({"$out_name", runner.storage_map["$out_name"]});'
).substitute(out_name=out_name)
)
self.clml_code.append(
Template('runner.outputs_dtypes.insert({"$out_name", "$dtype"});').substitute(
out_name=out_name, dtype=dtype
)
)
self.clml_code.append(
Template(
"runner.outputs_shapes.insert" '({"$out_name", std::vector<size_t>({$shape})});'
).substitute(out_name=out_name, shape=shape)
)
self.output_meta.append(
self.MakeOutputMetaInfo.substitute(out_name=out_name, dtype=dtype, shape=shape)
)
# Mem allocation & Param copy
self.clml_code.append("// Allocate Tensor Memory and copy params")
self.clml_code.append("runner.AllocateMemAndPopulateParams();")
# Meta data preparation
self.clml_code.append(
self.MakeMetaInfo.substitute(
name=self.sub_module_name,
input_count=len(self.input_meta),
output_count=len(self.output_meta),
input_meta="\\\n".join(self.input_meta),
output_meta="\\\n".join(self.output_meta),
)
)
self.clml_code.append(self.MakeFooter.substitute())
return (self.sub_module_name, self.clml_code)
class CLMLGenSrc:
"""Generates CLML API source given a TVM compiled mod"""
def __init__(self, libm):
"""Initialize
Parameters
----------
libm : Module
Compiled relay module
"""
self.libm = libm
self.gen_src = []
self.clml_modules = None
self.clml_builds = {}
self.codegen = None
self.nodes = None
self.MakeFileHeader = Template(
"""/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \\file clml_models.cc
* \\brief CLML models for all subgraph in given TVM module.
*/
// AUTO GENERATED BY TOOL (clml_codegen.py), PLEASE DO NOT CHANGE THIS FILE!
// =========================================================================
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <algorithm>
#include <math.h>
#include <list>
// Project includes
#include "CL/cl.h"
#include "CL/cl_qcom_ml_ops.h"
#include "clml_runner.h"
using namespace tvm::runtime;
"""
)
def get_clml_params(self):
"""Returns parameters from the TVM module"""
clml_params = {}
if self.libm.get_lib().type_key == "const_loader":
params = self.libm.get_lib().get_function("get_const_var_ndarray")()
clml_params.update(params)
for mod in self.libm.get_lib().imported_modules:
if mod.type_key == "const_loader":
params = mod.get_const_var_ndarray()
clml_params.update(params)
clml_params_save = {}
for key, val in clml_params.items():
clml_params_save[str(key)] = val.numpy()
return clml_params_save
def get_artifacts(self):
"""Function that returns params as dict and source as list of cource code lines"""
self.clml_modules = list(
filter(lambda mod: mod.type_key == "clml", self.libm.get_lib().imported_modules)
)
self.clml_builds["file_header"] = [self.MakeFileHeader.substitute()]
for cmod in self.clml_modules:
(sub_module_name, clml_code) = CLMLGetSubModuleSrc(cmod).get_src()
self.clml_builds[sub_module_name] = clml_code
main_code = []
main_code.append(
"""
std::vector<CLMLRunner> BuildModules(ToolArgs& args,
cl_platform_id arg_platform,
cl_context arg_context,
cl_device_id arg_device_id,
cl_command_queue arg_queue) {
std::vector<CLMLRunner> runners;"""
)
for key, val in self.clml_builds.items():
if key != "file_header":
main_code.append(
"runners.push_back("
+ key
+ '("'
+ key
+ '", args, arg_platform, arg_context, arg_device_id, arg_queue));'
)
main_code.append("return runners;}")
self.clml_builds["MainBuild"] = main_code
for key, val in self.clml_builds.items():
self.gen_src.extend(val)
return (self.get_clml_params(), self.gen_src)
| 49,674 | 37.991366 | 100 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/register.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Register utilities for external codegen."""
_PATTERN_TABLES = {}
def register_pattern_table(compiler, table=None):
"""Register a pattern table for an external compiler.
Pattern tables are used to create composite functions.
See the MergeComposite pass.
Parameters
----------
compiler : str
The name of compiler
table : function, optional
A function that returns the pattern table
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(t):
"""internal register function"""
_PATTERN_TABLES[compiler] = t()
return t
return _register(table) if table is not None else _register
def get_pattern_table(compiler):
"""Get the pattern table associated with a compiler (if it's registered)."""
return _PATTERN_TABLES[compiler] if compiler in _PATTERN_TABLES else None
| 1,708 | 31.865385 | 80 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/ethosu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=ungrouped-imports, import-outside-toplevel
"""Arm(R) Ethos(TM)-U NPU supported operators."""
import functools
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm.ir import Op
from tvm.relay.build_module import bind_params_by_name # type: ignore
from tvm.relay.dataflow_pattern import ( # type: ignore
is_constant,
is_op,
is_tuple,
wildcard,
)
from tvm.relay.expr import Call, Constant # type: ignore
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
try:
# As ethos-u-vela package is an optional TVM dependency, we want to lazy load it
# and check whether it is installed or not.
#
# In order to show the appropriate error messages when we try to invoke code that
# rely on imports from ethos-u-vela, we protect them with the decorator @requires_vela
# implemented below.
from ethosu.vela import api as vapi # type: ignore
except ImportError:
vapi = None
def requires_vela(func):
"""Decorator to check whether we have the required dependency ethos-u-vela
installed as a python package"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not vapi:
raise ImportError(
"The 'ethos-u-vela' python package is required for the Arm(R) Ethos(TM)-U NPU "
"backend. Please install the dependency using your Python package manager."
) from None
return func(*args, **kwargs)
return wrapper
class TensorParams:
"""
This class will parse a tvm Expr along with quantization scale
and zero point to populate parameters that are required
for the creation of tensors in Vela.
"""
@requires_vela
def __init__(self, tensor, layout=None, scale=None, zero_point=None):
self.tensor = tensor
if isinstance(tensor, Constant):
self.values = tensor.data.asnumpy()
else:
self.values = None
self.dtype = tensor.checked_type.dtype
self.shape = [int(i) for i in tensor.checked_type.shape]
self.layout = layout
if scale is not None and zero_point is not None:
self.q_params = vapi.NpuQuantization(
scale.data.asnumpy().astype("float32"), zero_point.data.asnumpy().astype(self.dtype)
)
else:
# put default values
self.q_params = vapi.NpuQuantization(1.0, 0)
def check_strides(strides: List[int], stride_range=None) -> bool:
"""This function checks whether strides are within the limits supported by the NPU"""
if stride_range is None:
stride_range = (1, 3)
smin, smax = stride_range
if not smax >= strides[0] >= smin:
return False
if not smax >= strides[1] >= smin:
return False
return True
def check_same_ifm_and_kernel_shape(padding, ifm_shape, pool_shape):
"""
This function checks whether AvgPool2D or MaxPool2D could be legalized as ethosu_pooling
supported by the NPU.
We consider only specific case: when there is no AvgPool2D padding, the spatial
dimensions of ifm and the shape of pooling are equal, but stride size exceed 3
by any of dimensions, e.g:
ifm: (1, 8, 8, _), strides: (8, 8), pool_shape: (8, 8)
ifm: (1, 25, 5, _), strides: (25, 5), pool_shape: (25, 5)
"""
if list(padding) != [0, 0, 0, 0]:
return False
if [ifm_shape[1], ifm_shape[2]] != list(pool_shape):
return False
return True
def check_valid_dtypes(tensor_params: List[TensorParams], supported_dtypes: List[type]) -> bool:
"""This function checks whether dtypes are supported by the NPU"""
for tep in tensor_params:
# Check for dtypes
if np.dtype(tep.dtype) not in supported_dtypes:
return False
# Check for shape sizes
if any(dimlen > 65536 for dimlen in tep.shape):
return False
return True
def check_weights(weights: TensorParams, dilation: List[int]):
"""This function checks whether weight tensor is compatible with the NPU"""
from tvm.relay.backend.contrib.ethosu.util import get_dim_value
dilated_height_range = (1, 64)
dilated_hxw_range = (1, 64 * 64)
weights_limit = 127 * 65536
dilated_width = (weights.shape[get_dim_value(weights.layout, "W")] - 1) * dilation[0] + 1
dilated_height = (weights.shape[get_dim_value(weights.layout, "H")] - 1) * dilation[1] + 1
dh_min, dh_max = dilated_height_range
if not dh_min <= dilated_height <= dh_max:
return False
dilated_hxw = dilated_height * dilated_width
dhxw_min, dhxw_max = dilated_hxw_range
if not dhxw_min <= dilated_hxw <= dhxw_max:
return False
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = (
get_dim_value(weights.layout, "H"),
get_dim_value(weights.layout, "W"),
get_dim_value(weights.layout, "I"),
)
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
return sum_weights <= weights_limit
def check_bias(bias: TensorParams):
"""This function checks whether the bias values fit in 40 bits"""
if bias and bias.dtype == np.dtype("int64"):
valid = all(len(bin(bias_value)[2:]) <= 40 for bias_value in bias.values)
return valid
return True
def check_batch_size(ifm: TensorParams):
"""This function checks for the number of batches vela currently supports"""
return ifm.shape[0] == 1
def check_dilation(dilation: List[int], dilation_range=None):
"""This function checks whether dilation is within the limits supported by the NPU"""
if dilation_range is None:
dilation_range = (1, 2)
dmin, dmax = dilation_range
if not dmin <= dilation[0] <= dmax:
return False
if not dmin <= dilation[1] <= dmax:
return False
return True
def check_padding(padding: List[int], bounds: List[int]):
"""This function checks whether padding is within the limits supported by the NPU"""
if len(padding) != 4 or len(bounds) != 4:
return False
top, left, bottom, right = padding
topb, leftb, bottomb, rightb = bounds
return not (top > topb or left > leftb or bottom > bottomb or right > rightb)
def check_pool_shape(pool_shape: tvm.ir.container.Array) -> bool:
if len(pool_shape) != 2:
return False
if pool_shape[1] > 256:
return False
if pool_shape[0] * pool_shape[1] > 256 * 256:
return False
return True
def check_dimensions(tensor: TensorParams):
"""This function checks that the tensor has no more than 4 dimensions"""
return len(tensor.shape) <= 4
class QnnConv2DParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d composite function
and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
activation_map = {"clip": "CLIP"}
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import QConv2DArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs, RequantArgs
activation = None
separate_padding = None
if str(func_body.op.name) in self.activation_map.keys():
activation = func_body
requantize_op = activation.args[0]
else:
requantize_op = func_body
bias_add = requantize_op.args[0]
qnn_conv2d = bias_add.args[0]
if (
isinstance(qnn_conv2d.args[0], relay.Call)
and isinstance(qnn_conv2d.args[0].op, Op)
and str(qnn_conv2d.args[0].op.name) == "nn.pad"
):
separate_padding = qnn_conv2d.args[0]
data_layout = qnn_conv2d.attrs.data_layout
self.kernel_layout = qnn_conv2d.attrs.kernel_layout
# We consider the weights & biases as params as it should be a Constant
self.weights = TensorParams(
qnn_conv2d.args[QConv2DArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d.args[QConv2DArgs.WEIGHTS_SCALE.value],
qnn_conv2d.args[QConv2DArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
ifm_tensor = (
separate_padding.args[0] if separate_padding else qnn_conv2d.args[QConv2DArgs.IFM.value]
)
self.ifm = TensorParams(
ifm_tensor,
data_layout,
qnn_conv2d.args[QConv2DArgs.IFM_SCALE.value],
qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d.attrs
pad_value = int(qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value].data.asnumpy())
self.padding = self.extract_padding(attrs.padding, separate_padding, pad_value)
self.strides = attrs.strides
self.dilation = attrs.dilation
self.activation = activation
self.channels = attrs.channels
# If groups are equal to channel, its a depthwise_conv2d
self.groups = attrs.groups
self.is_depthwise = False
channels_axis = {"HWIO": 3, "HWOI": 2}
if self.groups == self.weights.shape[channels_axis[self.kernel_layout]]:
self.is_depthwise = True
@staticmethod
def extract_padding(
operator_padding: Tuple[int, int, int, int],
separate_padding: relay.Call,
pad_value: int,
) -> Optional[Tuple[int, int, int, int]]:
"""
Convolution operations can sometimes have padding represented as a separate
padding operation before the convolution operation itself. Here we can check
whether these representations can be combined into a single padding attribute
as part of the NPU convolution itself. If the padding specified by the separate
nn.pad operation is not supported, None will be returned. This will cause the
nn.pad to be offloaded separately.
"""
if separate_padding is None:
return operator_padding
if pad_value != int(separate_padding.args[1].data.asnumpy()):
return None
pad_width = separate_padding.attrs["pad_width"]
if len(pad_width) != 4:
return None
if list(pad_width[0]) != [0, 0] or list(pad_width[3]) != [0, 0]:
return None
top, left, bottom, right = operator_padding
return [
top + pad_width[1][0],
left + pad_width[2][0],
bottom + pad_width[1][1],
right + pad_width[2][1],
]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
legal_groups = [1, self.ofm.shape[3]]
if self.groups not in legal_groups:
return False
# This should be a valid QnnDepthwiseConv2DParams, not QnnConv2DParams
return not self.is_depthwise
class QnnConv2DTransposeParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d_transpose composite
function and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d_transpose"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import (
QConv2DTransposeArgs, # type: ignore
)
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs, RequantArgs
requantize = func_body
call = func_body.args[0]
if str(call.op.name) == "nn.bias_add":
bias_add = call
call = call.args[0]
else:
bias_add = None
qnn_conv2d_transpose = call
data_layout = qnn_conv2d_transpose.attrs.data_layout
self.kernel_layout = qnn_conv2d_transpose.attrs.kernel_layout
self.weights = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM.value],
data_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d_transpose.attrs
self.strides = attrs.strides
self.dilation = attrs.dilation
self.padding = attrs.padding
self.channels = attrs.channels
self.groups = attrs.groups
self.output_padding = attrs.output_padding
kernel_size_map = {
"IOHW": self.weights.shape[2:4],
}
self.kernel_shape = kernel_size_map[str(self.weights.layout)]
# Different padding is used in the legalization from conv2d_transpose
# to conv2d, so we to calculate it here to check that the new size fits
# within the bounds of the NPU before offloading.
pad_top = int(self.kernel_shape[0]) - 1 - int(self.padding[0])
pad_left = int(self.kernel_shape[1]) - 1 - int(self.padding[1])
pad_bottom = int(self.kernel_shape[0]) - 1 - int(self.padding[2])
pad_right = int(self.kernel_shape[1]) - 1 - int(self.padding[3])
if self.strides == [2, 2]:
pad_bottom -= 1
pad_right -= 1
self.legalize_padding = [pad_top, pad_left, pad_bottom, pad_right]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
def check_compatible_output_size(ifm_shape, ofm_shape, padding, strides, kernel_shape):
is_valid_padding = padding == [0, 0, 0, 0]
if is_valid_padding:
expected_height = ifm_shape[1] * strides[0] + (kernel_shape[0] - strides[0])
expected_width = ifm_shape[2] * strides[1] + (kernel_shape[1] - strides[1])
else:
expected_height = ifm_shape[1] * strides[0]
expected_width = ifm_shape[2] * strides[1]
return ofm_shape[1] == expected_height and ofm_shape[2] == expected_width
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if self.biases and not check_bias(self.biases):
return False
if not check_strides(self.strides, stride_range=(2, 2)):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation, dilation_range=(1, 1)):
return False
if not check_compatible_output_size(
self.ifm.shape,
self.ofm.shape,
[int(x) for x in self.padding],
self.strides,
self.kernel_shape,
):
return False
if not check_padding(self.legalize_padding, self.padding_bounds):
return False
if self.kernel_shape[0] - 2 - int(self.padding[2]) < 0:
return False
if self.kernel_shape[1] - 2 - int(self.padding[3]) < 0:
return False
if self.groups != 1:
return False
if list(self.output_padding) != [0, 0]:
return False
return True
class QnnDepthwiseConv2DParams(QnnConv2DParams):
"""
This class will parse a call to a ethosu.depthwise_conv2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.depthwise_conv2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
def __init__(self, func_body: tvm.relay.expr.Call):
QnnConv2DParams.__init__(self, func_body)
def is_valid(self):
"""
Checks whether QnnDepthwiseConv2D + activation function has compatible attributes with HW
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
if self.weights.layout != "HWOI":
return False
# only depth multiplier of size 1 is supported
if self.weights.shape[3] != 1:
return False
if not self.is_depthwise:
return False
return True
def qnn_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWIO"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_depthwise_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for depthwise qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWOI"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_conv2d_transpose_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2d_transpose.
"""
qnn_conv2d_transpose = is_op("qnn.conv2d_transpose")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
).has_attr({"kernel_layout": "IOHW"})
optional_bias_add = (
is_op("nn.bias_add")(qnn_conv2d_transpose, is_constant()) | qnn_conv2d_transpose
)
req = is_op("qnn.requantize")(
optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
return req
class MaxPool2DParams:
"""
This class will parse a call to a ethos-u.maxpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.maxpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [127, 127, 128, 128]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op.name) == "clip":
clip = func_body
pool_op = clip.args[0]
else:
pool_op = func_body
attrs = pool_op.attrs
self.ifm = TensorParams(pool_op.args[0], attrs.layout)
self.ofm = TensorParams(pool_op, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.activation = clip
self.pooling_type = "MAX"
def is_valid(self):
"""
This function checks whether MaxPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides) and not check_same_ifm_and_kernel_shape(
self.padding, self.ifm.shape, self.pool_shape
):
return False
if not check_batch_size(self.ifm):
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
return True
def qnn_maxpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.max_pool2d with optional fused RELU activation.
"""
pattern = is_op("nn.max_pool2d")(wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class AvgPool2DParams:
"""
This class will parse a call to a ethos-u.avgpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.avgpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [3, 3, 4, 4]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op.name) == "clip":
clip = func_body
cast2 = clip.args[0]
else:
cast2 = func_body
avgpool = cast2.args[0]
cast1 = avgpool.args[0]
attrs = avgpool.attrs
self.ifm = TensorParams(cast1.args[0], attrs.layout)
self.ofm = TensorParams(cast2, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.count_include_pad = attrs.count_include_pad
self.activation = clip
self.pooling_type = "AVG"
def is_valid(self):
"""
This function checks whether AvgPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides) and not check_same_ifm_and_kernel_shape(
self.padding, self.ifm.shape, self.pool_shape
):
return False
if not check_batch_size(self.ifm):
return False
if self.count_include_pad:
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
# Average pool with padding only supports 1 <= pool_shape <= 8
if list(self.padding) != [0, 0, 0, 0] and (
self.pool_shape[0] > 8 or self.pool_shape[1] > 8
):
return False
return True
def qnn_avgpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.avg_pool2d with optional fused RELU activation.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
pattern = pattern.optional(is_op("clip"))
return pattern
class BinaryElementwiseParams:
"""
This class will parse a call to a ethosu.binary_elementwise composite function
and extract the parameter information.
"""
def __init__(self, func_body: Call, operator_type: str, is_quantized_operation: bool):
from tvm.relay.backend.contrib.ethosu.util import (
BinaryElementwiseArgs,
RequantArgs,
)
current_call = func_body
clip = None
requantize = None
if str(current_call.op.name) == "clip":
clip = current_call
current_call = clip.args[0]
elif str(current_call.op.name) == "qnn.requantize":
requantize = current_call
clip = current_call.args[0]
current_call = clip.args[0]
binary_op = current_call
layout = "NHWC"
if is_quantized_operation:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM_ZERO_POINT.value],
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM2_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM2_ZERO_POINT.value],
)
self.ofm = TensorParams(
binary_op,
layout,
binary_op.args[BinaryElementwiseArgs.OFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.OFM_ZERO_POINT.value],
)
else:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ofm = TensorParams(
func_body,
layout,
requantize.args[RequantArgs.OFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.OFM_ZERO_POINT.value] if requantize else None,
)
self.activation = clip
self.operator_type = operator_type
def can_broadcast(ifm, ifm2):
if len(ifm.shape) < len(ifm2.shape):
return False
for m, n in zip(ifm.shape[::-1], ifm2.shape[::-1]):
if m != n and m == 1:
return False
return True
if can_broadcast(self.ifm, self.ifm2):
self.reversed_operands = False
self.valid_broadcast = True
elif can_broadcast(self.ifm2, self.ifm):
self.reversed_operands = True
self.ifm, self.ifm2 = self.ifm2, self.ifm
self.valid_broadcast = True
else:
self.valid_broadcast = False
def is_valid(self):
"""
This function checks whether BinaryElementwise has compatible attributes with the NPU
"""
if np.dtype(self.ofm) == np.int32 and self.activation is not None:
return False
# Due to identity operator requiring ofm != int32 for now
if np.dtype(self.ofm) == np.int32 and len(self.ofm.shape) < 4:
return False
if len(self.ifm.shape) > 4 or len(self.ifm2.shape) > 4:
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if len(self.ifm2.shape) == 4 and self.ifm2.shape[0] != 1:
return False
if not self.valid_broadcast:
return False
return True
class AddParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Add composite function
and extract the parameter information.
"""
composite_name = "ethos-u.add"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "ADD", True)
def is_valid(self):
"""
This function checks whether Add has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_add_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.add with optional fused RELU activation.
"""
pattern = is_op("qnn.add")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class SubParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Sub composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sub"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SUB", True)
def is_valid(self):
"""
This function checks whether Sub has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_subtract_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.subtract with optional fused RELU activation.
"""
pattern = is_op("qnn.subtract")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MulParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Mul composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mul"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MUL", True)
def is_valid(self):
"""
This function checks whether Mul has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_mul_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.mul with optional fused RELU activation.
"""
pattern = is_op("qnn.mul")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MinParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Min composite function
and extract the parameter information.
"""
composite_name = "ethos-u.min"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MIN", False)
def is_valid(self):
"""
This function checks whether Min has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
# MIN with different scales is not supported on NPU
# (please look at NPU_SET_OFM_SCALE register description
# https://developer.arm.com/documentation/102420/0200/Programmers-model/Command-stream/cmd1-commands-).
if self.ifm.q_params.scale_f32 != self.ofm.q_params.scale_f32:
return False
return True
# This pattern is for case when there are different scales for requantize and
# minimum + clip + qnn.requantize can't be offloaded to NPU by one operation
# due to hardware constraints.
# It's offloaded by two operations ethosu_binary_elementwise + ethosu_identity.
def minimum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for minimum with optional fused RELU activation without
requantize.
"""
minimum = is_op("minimum")(wildcard(), wildcard())
optional_min_clip = is_op("clip")(minimum)
return minimum | optional_min_clip
def minimum_clip_requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for minimum with fused RELU activation with requantize.
"""
pattern = is_op("minimum")(wildcard(), wildcard())
pattern = is_op("clip")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
class MaxParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Max composite function
and extract the parameter information.
"""
composite_name = "ethos-u.max"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MAX", False)
def is_valid(self):
"""
This function checks whether Max has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
# MAX with different scales is not supported on NPU
# (please look at NPU_SET_OFM_SCALE register description
# https://developer.arm.com/documentation/102420/0200/Programmers-model/Command-stream/cmd1-commands-).
if self.ifm.q_params.scale_f32 != self.ofm.q_params.scale_f32:
return False
return True
# This pattern is for case when there are different scales for requantize and
# maximum + clip + qnn.requantize can't be offloaded to NPU by one operation due to
# hardware constraints.
# It's offloaded by two operations ethosu_binary_elementwise + ethosu_identity.
def maximum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for maximum with optional fused RELU activation without
requantize.
"""
maximum = is_op("maximum")(wildcard(), wildcard())
optional_max_clip = is_op("clip")(maximum)
return maximum | optional_max_clip
def maximum_clip_requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for maximum with fused RELU activation with requantize.
"""
pattern = is_op("maximum")(wildcard(), wildcard())
pattern = is_op("clip")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
class ShlParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Shl composite function
and extract the parameter information.
"""
composite_name = "ethos-u.shl"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SHL", False)
def is_valid(self):
"""
This function checks whether Shl has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes([self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.int32]):
return False
return True
def shl_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for left_shift with optional fused RELU activation.
"""
pattern = is_op("left_shift")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class ReshapeParams:
"""
This class will parse a call to a ethosu.reshape composite function
and extract the parameter information.
"""
composite_name = "ethos-u.reshape"
def __init__(self, func_body: Call):
self.new_shape = func_body.attrs.newshape
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
def reshape_pattern():
"""Create pattern for reshape"""
pattern = is_op("reshape")(wildcard())
return pattern
class StridedSliceParams:
"""
This class will parse a call to a ethosu.strided_slice composite function
and extract the parameter information.
"""
composite_name = "ethos-u.strided_slice"
def __init__(self, func_body: Call):
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
attrs = func_body.attrs
# The indices where we begin the slice
self.begin = attrs.begin
# The indices where we end the slice
self.end = attrs.end
self.strides = attrs.strides
self.axes = attrs.axes
self.slice_mode = attrs.slice_mode
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if len(self.begin) != len(self.end):
return False
for begin_idx, end_idx in zip(self.begin, self.end):
if begin_idx > end_idx:
return False
# Only strides of 1 are supported
if self.strides:
if not all([i == 1 for i in self.strides]):
return False
return True
def strided_slice_pattern():
"""Create pattern for strided_slice"""
pattern = is_op("strided_slice")(wildcard())
return pattern
class AbsParams:
"""
This class will parse a call to a ethosu.unary_elementwise Abs composite function
and extract the parameter information.
"""
composite_name = "ethos-u.abs"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs, QuantizeArgs
quantize = func_body
abs_op = quantize.args[0]
dequantize = abs_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
dequantize.args[DequantizeArgs.IFM.value],
layout,
dequantize.args[DequantizeArgs.IFM_SCALE.value],
dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout,
quantize.args[QuantizeArgs.OFM_SCALE.value],
quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
self.operator_type = "ABS"
self.activation = None
def is_valid(self):
"""Checks whether Abs has compatible attributes with HW"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8, np.uint8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_dimensions(self.ifm):
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if self.ifm.shape != self.ofm.shape:
return False
return True
def abs_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""Create pattern for abs"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("abs")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
class LutActivationParams:
"""
A parent class for LUT based activation functions that extract the input and
output tensors and check whether they are valid.
"""
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs, QuantizeArgs
layout = "NHWC"
quantize = func_body
activation = quantize.args[0]
dequantize = activation.args[0]
in_var = dequantize.args[0]
self.ifm = TensorParams(
in_var,
layout=layout,
scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],
zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout=layout,
scale=quantize.args[QuantizeArgs.OFM_SCALE.value],
zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
def is_valid(self):
"""
This function checks whether activation has compatible attributes with the NPU
"""
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
class TanhParams(LutActivationParams):
composite_name = "ethos-u.tanh"
def tanh_pattern():
"""Create pattern for tanh"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
tanh = is_op("tanh")(dequant)
quant = is_op("qnn.quantize")(tanh, is_constant(), is_constant())
return quant
class SigmoidParams(LutActivationParams):
"""
This class will parse a call to a ethos-u.sigmoid composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sigmoid"
def sigmoid_pattern():
"""Create pattern for sigmoid"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
sigmoid = is_op("sigmoid")(dequant)
quant = is_op("qnn.quantize")(sigmoid, is_constant(), is_constant())
return quant
class LeakyReLUParams(LutActivationParams):
"""
This class will parse a call to ethos-u.leaky_relu composite function
and extract the parameter information.
"""
composite_name = "ethos-u.leaky_relu"
def __init__(self, func_body: Call):
super().__init__(func_body)
self.alpha = func_body.args[0].attrs.alpha
def leaky_relu_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for leaky relu.
"""
dequantize = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
leaky_relu = is_op("nn.leaky_relu")(dequantize)
return is_op("qnn.quantize")(leaky_relu, is_constant(), is_constant())
class MeanParams:
"""
This class will parse a call to ethosu.mean composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mean"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
requantize = func_body
mean_op = requantize.args[0]
attrs = mean_op.attrs
cast = mean_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
cast.args[0],
layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
ifm_shape = self.ifm.shape
self.height = ifm_shape[0] if len(ifm_shape) in (2, 3) else ifm_shape[1]
self.width = ifm_shape[1] if len(ifm_shape) in (2, 3) else ifm_shape[2]
self.keepdims = attrs.keepdims
self.axis = list(sorted(attrs.axis))
if attrs.exclude:
self.axis = [i for i in range(len(self.ifm.shape)) if i not in self.axis]
def is_valid(self) -> bool:
"""
Checks whether Mean has compatible attributes with HW.
"""
def check_axis(num_dims, axis):
if num_dims in (2, 3):
return axis in ([0], [1], [0, 1])
return axis in ([1], [2], [1, 2])
def check_single_axis_across_height(num_dims, axis):
return len(axis) == 1 and (num_dims in (2, 3) and axis == [0] or axis == [1])
same_quantization = (
self.ifm.q_params.scale_f32 == self.ofm.q_params.scale_f32
and self.ifm.q_params.zero_point == self.ofm.q_params.zero_point
)
# IFM must be int8 or uint8
if not check_valid_dtypes([self.ifm], [np.int8, np.uint8]):
return False
# OFM must be int8, uint8 or int16
if not check_valid_dtypes([self.ofm], [np.int8, np.uint8, np.int16]):
return False
# Input tensor must be at least 2D
if not len(self.ifm.shape) in [2, 3, 4]:
return False
# Axis indices must correspond to height and width axes
if not check_axis(len(self.ifm.shape), self.axis):
return False
input_size = self.height * self.width
# Product of height and width must be no greater than 65536
if input_size > 65536:
return False
# Product of height and width must be no greater than 4096 when:
# IFM and OFM have different scale or zero point; or
# 'keep_dims' is True
if input_size > 4096 and (not same_quantization or self.keepdims):
return False
# For single axis averages across the height dimension:
if check_single_axis_across_height(len(self.ifm.shape), self.axis):
# IFM height must be no greater than 256 if the IFM and OFM scale and zero point match
if self.height > 256 and same_quantization:
return False
# IFM height must be no greater than 64 if the IFM and OFM scale or zero point
# do not match
if self.height > 64 and not same_quantization:
return False
return True
def mean_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for mean.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("mean")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
class SumParams:
"""
This class will parse a call to ethosu.sum composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sum"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
clip = None
if str(func_body.op.name) == "clip":
clip = func_body
requantize = clip.args[0]
else:
requantize = func_body
sum_op = requantize.args[0]
attrs = sum_op.attrs
cast = sum_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
cast.args[0],
layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
self.activation = clip
ifm_shape = self.ifm.shape
self.height = ifm_shape[0] if len(ifm_shape) in (2, 3) else ifm_shape[1]
self.width = ifm_shape[1] if len(ifm_shape) in (2, 3) else ifm_shape[2]
self.keepdims = attrs.keepdims
self.axis = list(sorted(attrs.axis))
if attrs.exclude:
self.axis = [i for i in range(len(self.ifm.shape)) if i not in self.axis]
def is_valid(self) -> bool:
"""
Checks whether Sum has compatible attributes with HW.
"""
ifm_shape_len = len(self.ifm.shape)
if not check_valid_dtypes([self.ifm], [np.uint8, np.int8, np.int16, np.int32]):
return False
if not check_valid_dtypes([self.ofm], [np.int8]):
return False
if not ifm_shape_len in (3, 4):
return False
if ifm_shape_len == 3 and self.axis not in [[2]]:
return False
if ifm_shape_len == 4 and self.axis not in [[3]]:
return False
return True
def sum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for sum.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("sum")(pattern)
pattern = is_op("qnn.requantize")(
pattern,
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class ConcatParams:
"""
This class will parse a call to a ethos-u.concat composite function
and extract the parameter information.
"""
composite_name = "ethos-u.concat"
def __init__(self, func_body):
self.concat = func_body
self.is_qnn_variant = self.concat.op.name == "qnn.concatenate"
self.input_tensors = [TensorParams(tensor) for tensor in list(func_body.args[0])]
self.axis = func_body.attrs.axis
if self.is_qnn_variant:
self.input_scales = [s.data.asnumpy() for s in list(func_body.args[1])]
self.input_zero_points = [zp.data.asnumpy() for zp in list(func_body.args[2])]
def is_valid(self):
"""Checks whether Concatenate has compatible attributes with the hardware"""
if not check_valid_dtypes(self.input_tensors, supported_dtypes=[np.int8]):
return False
# Check that the scales and zero points of input tensors are the same
if self.is_qnn_variant and not all(self.input_scales == self.input_scales[0]):
return False
if self.is_qnn_variant and not all(self.input_zero_points == self.input_zero_points[0]):
return False
input_dim = len(self.input_tensors[0].shape)
for tensor in self.input_tensors:
if len(tensor.shape) != input_dim:
return False
if self.axis is None:
return False
if self.axis < 0:
return False
if self.axis >= input_dim:
return False
output_shape = self.concat.checked_type.shape
if len(output_shape) != input_dim:
return False
if len(output_shape) > 3 and output_shape[0] != 1:
return False
return True
def concat_pattern():
"""Create pattern for concat"""
tensors = is_tuple(None)
scales = is_tuple(None)
zero_points = is_tuple(None)
qnn_concat = is_op("qnn.concatenate")(
tensors, scales, zero_points, is_constant(), is_constant()
)
concat = is_op("concatenate")(tensors)
return concat | qnn_concat
class SplitParams:
"""
This class will parse a call to a ethos-u.split composite function
and extract the parameter information.
"""
composite_name = "ethos-u.split"
def __init__(self, func_body):
self.split = func_body
self.input = TensorParams(func_body.args[0])
self.axis = func_body.attrs.axis
self.indices_or_sections = self.convert_indices_or_sections(
func_body.attrs.indices_or_sections
)
def convert_indices_or_sections(self, indices_or_sections):
# split_v
if isinstance(indices_or_sections, tvm.ir.container.Array):
values = [i.value for i in indices_or_sections]
# split
else:
values = indices_or_sections.value
return values
def is_valid(self):
"""Checks whether split has compatible attributes with the hardware"""
if not check_valid_dtypes([self.input], supported_dtypes=[np.int8]):
return False
return True
def split_pattern():
"Create the pattern for split"
split = is_op("split")(wildcard())
return split
class RequantizeParams:
"""
This class will parse a call to ethos-u.requantize composite function
and extract the parameter information.
"""
composite_name = "ethos-u.requantize"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
layout = "NHWC"
in_var = func_body.args[0]
requantize = func_body
self.ifm = TensorParams(
in_var,
layout=layout,
scale=requantize.args[RequantArgs.IFM_SCALE.value],
zero_point=requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout=layout,
scale=requantize.args[RequantArgs.OFM_SCALE.value],
zero_point=requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = requantize.attrs
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether qnn.requantize has compatible attributes with HW.
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.requantize.
"""
return is_op("qnn.requantize")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
class Resize2dParams:
"""
This class will parse a call to ethos-u.resize2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.resize2d"
def __init__(self, func_body: Call):
layout = "NHWC"
resize_2d = func_body
in_var = func_body.args[0]
if (
isinstance(resize_2d, tvm.relay.expr.Call)
and isinstance(resize_2d.op, tvm.ir.Op)
and resize_2d.op.name == "qnn.quantize"
):
resize_2d = resize_2d.args[0]
in_var = in_var.args[0].args[0]
out_var = func_body
self.ifm = TensorParams(in_var, layout=layout)
self.ofm = TensorParams(out_var, layout=layout)
attrs = resize_2d.attrs
self.size = attrs.size
self.method = attrs.method
self.roi = attrs.roi
self.coordinate_transformation_mode = attrs.coordinate_transformation_mode
self.rounding_method = attrs.rounding_method
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether image.resize2d has compatible attributes with HW.
"""
def check_compatible_size(mode, method, upscale_size, ifm_size):
"""Checking the provided upscale_size is compatible with the NPU. The NPU only
supports upsampling when the upsampling size is 2 * input_size, or when there is
no upsampling to be done, so check that this is the case. In the special case of
resize_bilinear with align_corners=True, the NPU only supports an upsampling
size of 2 * input_size - 1."""
delta = 1 if mode == "align_corners" and method == "linear" else 0
upscale_size = np.array(upscale_size)
ifm_size = np.array(ifm_size)
ifm_upscaled = ifm_size * 2 - delta
return (ifm_upscaled == upscale_size).all() or (ifm_size == upscale_size).all()
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if len(self.ifm.shape) != 4 or len(self.ofm.shape) != 4:
return False
if list(float(x) for x in self.roi) != [0.0] * 4:
return False
if self.method not in ("nearest_neighbor", "linear"):
return False
if self.coordinate_transformation_mode not in (
"asymmetric",
"align_corners",
"half_pixel",
):
return False
if (
self.coordinate_transformation_mode == "half_pixel"
and self.rounding_method != "round_prefer_ceil"
or self.coordinate_transformation_mode != "half_pixel"
and self.rounding_method != ""
):
return False
if not check_compatible_size(
self.coordinate_transformation_mode,
self.method,
self.size,
self.ifm.shape[1:3],
):
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def resize2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for image.resize2d.
"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
resize_2d = is_op("image.resize2d")(dequant).has_attr({"method": "linear"})
quant = is_op("qnn.quantize")(resize_2d, is_constant(), is_constant())
return quant | is_op("image.resize2d")(wildcard()).has_attr({"method": "nearest_neighbor"})
class ExpandDimsParams:
"""
This class will parse a call to a ethos-u.expand_dims composite function
and extract the parameter information.
"""
composite_name = "ethos-u.expand_dims"
def __init__(self, func_body):
self.expand_dims = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether expand_dims has compatible attributes with the hardware."""
if not check_dimensions(self.input) or not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def expand_dims_pattern():
"""Create the pattern for expand_dims."""
return is_op("expand_dims")(wildcard())
class SqueezeParams:
"""
This class will parse a call to a ethos-u.squeeze composite function
and extract the parameter information.
"""
composite_name = "ethos-u.squeeze"
def __init__(self, func_body):
self.squeeze = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether squeeze has compatible attributes with the hardware."""
if not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def squeeze_pattern():
"""Create the pattern for squeeze."""
return is_op("squeeze")(wildcard())
class FullyConnectedParams:
"""
This class will parse a call to an ethos-u.fully_connected composite
function and extract the parameter information.
"""
composite_name = "ethos-u.fully_connected"
@requires_vela
def __init__(self, func_body):
from tvm.relay.backend.contrib.ethosu.util import QDenseArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs, RequantArgs
self.activation = None
if str(func_body.op.name) == "clip":
self.activation = func_body
requantize_op = self.activation.args[0]
else:
requantize_op = func_body
call = requantize_op.args[0]
if str(requantize_op.args[0].op.name) == "nn.bias_add":
bias_add = call
qnn_dense = call.args[0]
else:
bias_add = None
qnn_dense = call
# weights & biases are params as they should be constant
self.weights = TensorParams(
qnn_dense.args[QDenseArgs.WEIGHTS.value],
None,
qnn_dense.args[QDenseArgs.WEIGHTS_SCALE.value],
qnn_dense.args[QDenseArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
None,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_dense.args[QDenseArgs.IFM.value],
None,
qnn_dense.args[QDenseArgs.IFM_SCALE.value],
qnn_dense.args[QDenseArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
None,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
def is_valid(self) -> bool:
"""
Checks whether Fully Connected has compatible attributes with HW
"""
def check_weights_fc(weights):
"""Checks whether weight tensor is compatible with HW"""
weights_limit = 127 * 65536
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = 1
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
if sum_weights > weights_limit:
return False
return True
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if not check_weights_fc(self.weights):
return False
if not check_bias(self.biases):
return False
if not check_batch_size(self.ifm):
return False
# Check input shape
if not len(self.ifm.shape) == 2:
return False
# Check output shape
if not len(self.ofm.shape) == 2:
return False
return True
def qnn_fc_pattern():
dense = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
optional_bias_add = is_op("nn.bias_add")(dense, is_constant())
req = is_op("qnn.requantize")(
dense | optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
optional_clip = req.optional(is_op("clip"))
return optional_clip
class HardSwishParams:
"""
This class will parse a call to a ethos-u.hard_swish composite function
and extract the parameter information.
"""
composite_name = "ethos-u.hard_swish"
def __init__(self, func_body):
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs, QuantizeArgs
quantize = func_body
divide = quantize.args[0]
multiply = divide.args[0]
clip = multiply.args[1]
add = clip.args[0]
dequantize = add.args[0]
self.ifm = TensorParams(
dequantize.args[0],
scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],
zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
scale=quantize.args[QuantizeArgs.OFM_SCALE.value],
zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
def is_valid(self):
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
return True
def hard_swish_pattern():
"""Create the pattern for hard swish."""
dequantize = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
add = is_op("add")(dequantize, is_constant())
clip = is_op("clip")(add)
multiply = is_op("multiply")(dequantize, clip)
divide = is_op("divide")(multiply, is_constant())
quantize = is_op("qnn.quantize")(divide, is_constant(), is_constant())
return quantize
class PadParams:
"""
This class will parse a call to a ethosu.pad2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.pad2d"
# The ethos-u.pad2d composite function will be transformed to the
# ethosu_depthwise_conv2d operator.
# For the ethosu_depthwise_conv2d the hardware only supports padding
# upto the numbers as follows, so we define such padding limits
padding_bounds = [31, 31, 32, 32]
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QPadArgs
# there is no 'layout' attribute in nn.pad
layout = "NHWC"
self.ifm = TensorParams(
tensor=func_body.args[QPadArgs.IFM.value],
layout=layout,
scale=tvm.relay.Constant(tvm.nd.array(np.array(1.0, dtype="float32"))),
zero_point=func_body.args[QPadArgs.IFM_ZERO_POINT.value],
)
self.padding = self.extract_padding(func_body)
self.ofm = TensorParams(
tensor=func_body,
layout=layout,
scale=tvm.relay.Constant(tvm.nd.array(np.array(1.0, dtype="float32"))),
zero_point=func_body.args[QPadArgs.IFM_ZERO_POINT.value],
)
@staticmethod
def extract_padding(
padding: relay.Call,
) -> Optional[Tuple[int, int, int, int]]:
"""
Here we check whether a separate spatial-dimension padding operation can be
rewritten as NPU depthwise convolution. If the padding specified by the
separate nn.pad operation is not supported by NPU depthwise convolution,
None will be returned. This will cause the nn.pad not to be offloaded to NPU.
"""
pad_width = padding.attrs["pad_width"]
if len(pad_width) != 4:
return None
if list(pad_width[0]) != [0, 0] or list(pad_width[3]) != [0, 0]:
return None
return [
pad_width[1][0],
pad_width[2][0],
pad_width[1][1],
pad_width[2][1],
]
def is_valid(self):
"""
This function checks whether pad has compatible attributes
with the NPU depthwise convolution
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_batch_size(self.ifm):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
return True
class ChannelPadParams:
"""
This class will parse a call to a ethos-u.channel-pad composite function
and extract the parameter information.
"""
composite_name = "ethos-u.channel-pad"
# The ethos-u.channel-pad composite function will be transformed
# to the Relay concatenate operation.
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QPadArgs
# there is no 'layout' attribute in nn.pad
layout = "NHWC"
self.ifm = TensorParams(
tensor=func_body.args[QPadArgs.IFM.value],
layout=layout,
scale=tvm.relay.Constant(tvm.nd.array(np.array(1.0, dtype="float32"))),
zero_point=func_body.args[QPadArgs.IFM_ZERO_POINT.value],
)
self.ch_padding = self.extract_ch_padding(func_body)
self.ofm = TensorParams(
tensor=func_body,
layout=layout,
scale=tvm.relay.Constant(tvm.nd.array(np.array(1.0, dtype="float32"))),
zero_point=func_body.args[QPadArgs.IFM_ZERO_POINT.value],
)
@staticmethod
def extract_ch_padding(
padding: relay.Call,
) -> Optional[Tuple[int, int]]:
"""
Here we check whether a separate channel-dimension padding operation can be
rewritten as Relay concatenate operation. If the padding specified by the
separate nn.pad operation is not supported by NPU, None will be returned.
This will cause the nn.pad not to be offloaded to NPU.
"""
pad_width = padding.attrs["pad_width"]
if len(pad_width) != 4:
return None
if (
list(pad_width[0]) != [0, 0]
or list(pad_width[1]) != [0, 0]
or list(pad_width[2]) != [0, 0]
):
return None
return [
pad_width[3][0],
pad_width[3][1],
]
def is_valid(self):
"""
This function checks whether pad has compatible attributes
with the Relay concatenate operation
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_batch_size(self.ifm):
return False
if not self.ch_padding:
return False
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
return True
def pad_pattern():
"""Create pattern for pad"""
pattern = is_op("nn.pad")(wildcard(), is_constant())
return pattern
class SoftMaxParams:
"""
This class will parse a call to a ethos-u.softmax composite function
and extract the parameter information.
"""
composite_name = "ethos-u.softmax"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
quantize = func_body
softmax_op = quantize.args[0]
dequantize = softmax_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
dequantize.args[DequantizeArgs.IFM.value],
layout,
dequantize.args[DequantizeArgs.IFM_SCALE.value],
dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout,
quantize.args[QuantizeArgs.OFM_SCALE.value],
quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
self.operator_type = "SOFTMAX"
def is_valid(self):
"""Checks whether Softmax has compatible attributes with HW"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_dimensions(self.ifm):
return False
if self.ifm.shape != self.ofm.shape:
return False
return True
def softmax_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for Softmax.
"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("nn.softmax")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
@register_pattern_table("ethos-u")
def pattern_table() -> List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]:
return [
(
ChannelPadParams.composite_name,
pad_pattern(),
lambda pat: ChannelPadParams(pat).is_valid(),
),
(
QnnConv2DParams.composite_name,
qnn_conv2d_pattern(),
lambda pat: QnnConv2DParams(pat).is_valid(),
),
(
QnnDepthwiseConv2DParams.composite_name,
qnn_depthwise_conv2d_pattern(),
lambda pat: QnnDepthwiseConv2DParams(pat).is_valid(),
),
(
QnnConv2DTransposeParams.composite_name,
qnn_conv2d_transpose_pattern(),
lambda pat: QnnConv2DTransposeParams(pat).is_valid(),
),
(
FullyConnectedParams.composite_name,
qnn_fc_pattern(),
lambda pat: FullyConnectedParams(pat).is_valid(),
),
(
MaxPool2DParams.composite_name,
qnn_maxpool2d_pattern(),
lambda pat: MaxPool2DParams(pat).is_valid(),
),
(
AvgPool2DParams.composite_name,
qnn_avgpool2d_pattern(),
lambda pat: AvgPool2DParams(pat).is_valid(),
),
(
PadParams.composite_name,
pad_pattern(),
lambda pat: PadParams(pat).is_valid(),
),
(
AddParams.composite_name,
qnn_add_pattern(),
lambda pat: AddParams(pat).is_valid(),
),
(
SubParams.composite_name,
qnn_subtract_pattern(),
lambda pat: SubParams(pat).is_valid(),
),
(
MulParams.composite_name,
qnn_mul_pattern(),
lambda pat: MulParams(pat).is_valid(),
),
(
MinParams.composite_name,
minimum_clip_requantize_pattern(),
lambda pat: MinParams(pat).is_valid(),
),
(
MinParams.composite_name,
minimum_pattern(),
lambda pat: MinParams(pat).is_valid(),
),
(
MaxParams.composite_name,
maximum_clip_requantize_pattern(),
lambda pat: MaxParams(pat).is_valid(),
),
(
MaxParams.composite_name,
maximum_pattern(),
lambda pat: MaxParams(pat).is_valid(),
),
(
ShlParams.composite_name,
shl_pattern(),
lambda pat: ShlParams(pat).is_valid(),
),
(
ReshapeParams.composite_name,
reshape_pattern(),
lambda pat: ReshapeParams(pat).is_valid(),
),
(
StridedSliceParams.composite_name,
strided_slice_pattern(),
lambda pat: StridedSliceParams(pat).is_valid(),
),
(
AbsParams.composite_name,
abs_pattern(),
lambda pat: AbsParams(pat).is_valid(),
),
(TanhParams.composite_name, tanh_pattern(), lambda pat: TanhParams(pat).is_valid()),
(
MeanParams.composite_name,
mean_pattern(),
lambda pat: MeanParams(pat).is_valid(),
),
(
SumParams.composite_name,
sum_pattern(),
lambda pat: SumParams(pat).is_valid(),
),
(
SoftMaxParams.composite_name,
softmax_pattern(),
lambda pat: SoftMaxParams(pat).is_valid(),
),
(
LeakyReLUParams.composite_name,
leaky_relu_pattern(),
lambda pat: LeakyReLUParams(pat).is_valid(),
),
(ConcatParams.composite_name, concat_pattern(), lambda pat: ConcatParams(pat).is_valid()),
(
SigmoidParams.composite_name,
sigmoid_pattern(),
lambda pat: SigmoidParams(pat).is_valid(),
),
(
SplitParams.composite_name,
split_pattern(),
lambda pat: SplitParams(pat).is_valid(),
),
(
RequantizeParams.composite_name,
requantize_pattern(),
lambda pat: RequantizeParams(pat).is_valid(),
),
(
Resize2dParams.composite_name,
resize2d_pattern(),
lambda pat: Resize2dParams(pat).is_valid(),
),
(
ExpandDimsParams.composite_name,
expand_dims_pattern(),
lambda pat: ExpandDimsParams(pat).is_valid(),
),
(
SqueezeParams.composite_name,
squeeze_pattern(),
lambda pat: SqueezeParams(pat).is_valid(),
),
(
HardSwishParams.composite_name,
hard_swish_pattern(),
lambda pat: HardSwishParams(pat).is_valid(),
),
]
# pylint: disable=unused-argument
@requires_vela
def partition_for_ethosu(
mod: tvm.ir.IRModule,
params: Optional[Dict[str, tvm.runtime.NDArray]] = None,
mod_name: str = "default",
**opts,
):
"""This helper function partition the relay graph as produced by the
relay frontend for a given model into external functions
to be presented to the codegen.
Parameters
----------
mod : tvm.ir.IRModule
The IRModule that gets generated from a relay frontend
params : Optional[Dict[str, tvm.runtime.NDArray]]
Constant input parameters.
mod_name: str, optional
The module name
Returns
-------
mod : IRModule
The partitioned IRModule with external global functions
"""
from tvm.relay.backend.contrib.ethosu import preprocess, codegen
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pattern = relay.op.contrib.get_pattern_table("ethos-u")
mod = relay.transform.InferType()(mod)
mod = codegen.replicate_pads(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern)(mod)
mod = relay.transform.AnnotateTarget("ethos-u")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph(mod_name)(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
| 81,090 | 33.34604 | 111 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/cutlass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Patterns supported CUTLASS."""
from functools import partial
from tvm import relay
from tvm.ir.transform import PassContext, Sequential
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
from ...dataflow_pattern import is_constant, is_op, wildcard
def make_gelu_pattern(bias_out, out_dtype="float16"):
mul = is_op("multiply")(bias_out, is_constant() | wildcard())
if out_dtype == "float16":
erf = is_op("cast")(is_op("erf")(is_op("cast")(mul)))
else:
erf = is_op("erf")(mul)
mul_half = is_op("multiply")(erf, is_constant() | wildcard())
add = is_op("add")(mul_half, is_constant() | wildcard())
return is_op("multiply")(add, bias_out)
def make_gemm_pattern(with_bias=True, with_act=None, out_dtype="float16"):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
gemm = is_op("nn.dense")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
gemm_out = add_or_bias_add(gemm, bias)
else:
gemm_out = gemm
if with_act is None:
return gemm_out
if isinstance(with_act, str) and with_act == "relu":
return is_op("nn.relu")(gemm_out)
assert isinstance(with_act, str) and with_act == "gelu"
return make_gelu_pattern(gemm_out, out_dtype)
def make_batch_matmul_pattern():
return is_op("nn.batch_matmul")(wildcard(), wildcard())
def make_conv2d_pattern(with_bias=False, with_act=None):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
conv2d = is_op("nn.conv2d")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
conv2d_out = add_or_bias_add(conv2d, bias)
else:
conv2d_out = conv2d
if with_act is not None:
if with_act == "relu":
return is_op("nn.relu")(conv2d_out)
if with_act == "sigmoid":
return is_op("sigmoid")(conv2d_out)
if with_act == "silu":
return is_op("multiply")(conv2d_out, is_op("sigmoid")(conv2d_out))
if with_act == "hardswish":
rhs = is_op("divide")(
is_op("clip")(is_op("add")(conv2d_out, is_constant())), is_constant()
)
return is_op("multiply")(conv2d_out, rhs)
raise ValueError(f"Unknown activation {with_act}.")
return conv2d_out
def make_conv2d_transpose_pattern():
return is_op("nn.conv2d_transpose")(wildcard(), wildcard())
def make_conv2d_backward_weight_pattern():
return is_op("nn.conv2d_backward_weight")(wildcard(), wildcard())
def make_residual_block_pattern(tensor_op_out, binary_op="add", with_act="relu"):
"""Add pattern for residual blocks."""
residual_input = wildcard()
binary_out = is_op(binary_op)(tensor_op_out, residual_input) | is_op(binary_op)(
residual_input, tensor_op_out
)
if with_act is not None and with_act == "relu":
return is_op("nn.relu")(binary_out)
return binary_out
def check_dtype(lhs, rhs):
"""Check if dtypes in the given workload are supported by CUTLASS."""
return (
(lhs.dtype == "float16" and rhs.dtype == "float16")
or (lhs.dtype == "float32" and rhs.dtype == "float32")
or (lhs.dtype in ["int8", "uint8"] and rhs.dtype in ["int8", "uint8"])
)
def get_root_call(call, root_op_name):
if not isinstance(call, relay.Call):
return None
if str(call.op.name) == root_op_name:
return call
return get_root_call(call.args[0], root_op_name)
def check_gemm(call):
"""Check if the given dense workload can be offloaded to CUTLASS."""
dense = get_root_call(call, "nn.dense")
lhs = dense.args[0].checked_type
rhs = dense.args[1].checked_type
return check_dtype(lhs, rhs)
def check_batch_matmul(call):
"""Check if the given batch_matmul workload can be offloaded to CUTLASS."""
batch_matmul = get_root_call(call, "nn.batch_matmul")
lhs = batch_matmul.args[0].checked_type
rhs = batch_matmul.args[1].checked_type
transpose_a = batch_matmul.attrs.transpose_a
transpose_b = batch_matmul.attrs.transpose_b
return check_dtype(lhs, rhs) and not transpose_a and transpose_b
def is_depthwise_conv2d(ic, oc, groups):
return ic == oc == groups
def check_conv2d_common(op_name, expected_kernel_layout, call):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, op_name)
data_layout = conv2d.attrs.data_layout
kernel_layout = conv2d.attrs.kernel_layout
data = conv2d.args[0].checked_type
weight = conv2d.args[1].checked_type
if (
data_layout != "NHWC"
or kernel_layout != expected_kernel_layout
or not check_dtype(data, weight)
):
return False
IC = data.shape[3]
OC = weight.shape[0]
return not is_depthwise_conv2d(IC, OC, conv2d.attrs.groups)
def check_conv2d(call):
return check_conv2d_common("nn.conv2d", "OHWI", call)
def check_conv2d_transpose(call):
# conv2d_transpose is implemented as dgrad, needs to swap the roles of C and K
return check_conv2d_common("nn.conv2d_transpose", "IHWO", call)
def check_conv2d_backward_weight(call):
return check_conv2d_common("nn.conv2d_backward_weight", "NHWC", call)
def check_conv2d_residual(call, binary_op):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, "nn.conv2d")
if not check_conv2d(call):
return False
residual_binop = get_root_call(call, binary_op)
lhs = residual_binop.args[0]
rhs = residual_binop.args[1]
# residual_input is pattern-matched as a wildcard. Make sure it does not sit between
# residual binary op and the root conv2d of this pattern.
# If the root conv2d is the parent of both lhs and rhs, we should reject this pattern.
if get_root_call(lhs, "nn.conv2d") == conv2d and get_root_call(rhs, "nn.conv2d") == conv2d:
return False
return all(x == y for (x, y) in zip(lhs.checked_type.shape, rhs.checked_type.shape))
@register_pattern_table("cutlass")
def pattern_table():
"""Returns list of triples describing the name, dataflow pattern and predicate for all
the CUTLASS-supported operators."""
dense_pat = ("cutlass.dense", make_gemm_pattern(False, None), check_gemm)
dense_bias_pat = ("cutlass.dense_bias", make_gemm_pattern(True, None), check_gemm)
dense_bias_relu_pat = ("cutlass.dense_bias_relu", make_gemm_pattern(True, "relu"), check_gemm)
dense_bias_gelu_fp16_pat = (
"cutlass.dense_bias_gelu_fp16",
make_gemm_pattern(True, "gelu"),
check_gemm,
)
dense_bias_gelu_fp32_pat = (
"cutlass.dense_bias_gelu_fp32",
make_gemm_pattern(True, "gelu", out_dtype="float32"),
check_gemm,
)
dense_patterns = [
dense_bias_gelu_fp16_pat,
dense_bias_gelu_fp32_pat,
dense_bias_relu_pat,
dense_bias_pat,
dense_pat,
("cutlass.batch_matmul", make_batch_matmul_pattern(), check_batch_matmul),
]
conv2d_patterns = [
(
"cutlass.conv2d_bias_hardswish",
make_conv2d_pattern(with_bias=True, with_act="hardswish"),
check_conv2d,
),
(
"cutlass.conv2d_bias_silu",
make_conv2d_pattern(with_bias=True, with_act="silu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_relu",
make_conv2d_pattern(with_bias=True, with_act="relu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_sigmoid",
make_conv2d_pattern(with_bias=True, with_act="sigmoid"),
check_conv2d,
),
("cutlass.conv2d_bias", make_conv2d_pattern(with_bias=True), check_conv2d),
("cutlass.conv2d", make_conv2d_pattern(), check_conv2d),
]
# For now, no fusion for grad kernels
conv2d_grad_patterns = [
("cutlass.conv2d_transpose", make_conv2d_transpose_pattern(), check_conv2d_transpose),
(
"cutlass.conv2d_backward_weight",
make_conv2d_backward_weight_pattern(),
check_conv2d_backward_weight,
),
]
residual_block_patterns = []
for with_act, postfix in [("relu", "_relu"), (None, "")]:
for name, pat, _ in conv2d_patterns[:-1]:
for bin_op in ["add", "multiply"]:
residual_block_patterns.append(
(
name + "_residual_" + bin_op + postfix,
make_residual_block_pattern(pat, bin_op, with_act=with_act),
partial(check_conv2d_residual, binary_op=bin_op),
)
)
return residual_block_patterns + dense_patterns + conv2d_patterns + conv2d_grad_patterns
def partition_for_cutlass(mod, params=None):
"""Partition the input module into CUTLASS-supported subgraphs."""
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
with PassContext(opt_level=3):
mod = remove_bn_pass(mod)
cutlass_patterns = relay.op.contrib.get_pattern_table("cutlass")
seq = Sequential(
[
transform.InferType(),
transform.MergeComposite(cutlass_patterns),
transform.AnnotateTarget(["cutlass"], include_non_call_ops=False),
transform.PartitionGraph(bind_constants=False),
]
)
return seq(mod)
| 10,781 | 33.557692 | 98 | py |
tvm | tvm-main/python/tvm/relay/op/contrib/cudnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""cuDNN Relay integration."""
from typing import Callable, List, Tuple
import tvm
import tvm.ir
from tvm import relay
from tvm import te
from tvm.relay import transform
from tvm.contrib import cudnn
from ...dataflow_pattern import is_op, wildcard
from .te_target import lower_composite, relay_to_runtime
from .register import register_pattern_table
tvm._ffi.register_func("relay.ext.cudnn", relay_to_runtime(tvm.target.cuda()))
def partition_for_cudnn(mod: tvm.IRModule) -> tvm.IRModule:
"""Partition the graph to offload for cuDNN.
Parameters
----------
mod : tvm.IRModule
The module to partition.
Returns
-------
tvm.IRModule
The partitioned module.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cudnn"),
transform.PartitionGraph(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cudnn")
def pattern_table() -> List[Tuple[str, relay.Pattern, Callable[[relay.Call], bool]]]:
"""Get the cuDNN pattern table."""
def softmax_pattern() -> relay.Pattern:
"""Create pattern for softmax."""
return is_op("nn.softmax")(wildcard())
def log_softmax_pattern() -> relay.Pattern:
"""Create pattern for log_softmax."""
return is_op("nn.log_softmax")(wildcard())
def conv2d_pattern() -> relay.Pattern:
"""Create pattern for conv2d."""
return is_op("nn.conv2d")(wildcard(), wildcard())
def conv2d_bias_act_pattern() -> relay.Pattern:
"""Create pattern for fused conv2d+bias+activation."""
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
return bias.optional(is_op("nn.relu"))
def check_softmax(matched: relay.Call) -> bool:
"""Check if softmax is supported by cuDNN."""
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
return True
def check_log_softmax(matched: relay.Call) -> bool:
"""Check if log_softmax is supported by cuDNN."""
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
if len(matched.args[0].checked_type.shape) != 2:
return False
if matched.attrs["axis"] not in (1, -1):
return False
return True
def check_conv2d(matched: relay.Call) -> bool:
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
if matched.attrs["data_layout"] != "NCHW" or matched.attrs["kernel_layout"] != "OIHW":
return False
padding = matched.attrs["padding"]
if padding[0] != padding[2] or padding[1] != padding[3]:
return False
return True
def check_conv2d_bias_act(matched: relay.Call) -> bool:
return True
return [
("cudnn.softmax", softmax_pattern(), check_softmax),
("cudnn.log_softmax", log_softmax_pattern(), check_log_softmax),
("cudnn.conv2d_bias_act", conv2d_bias_act_pattern(), check_conv2d_bias_act),
("cudnn.conv2d", conv2d_pattern(), check_conv2d),
]
@lower_composite("cudnn.softmax")
def _lower_softmax(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a softmax using cuDNN."""
return cudnn.softmax(inputs[0], axis=op.attrs["axis"])
@lower_composite("cudnn.log_softmax")
def _lower_log_softmax(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a log_softmax using cuDNN."""
return cudnn.log_softmax(inputs[0], axis=op.attrs["axis"])
@lower_composite("cudnn.conv2d_bias_act")
def _lower_conv2d_bias_act(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a fused conv2d+bias+activation using cuDNN."""
conv_dtype = op.checked_type.dtype
if op.op.name == "nn.relu":
activation_mode = 1 # Relu
conv2d = op.args[0].args[0]
else:
activation_mode = 5 # Identity
conv2d = op.args[0]
conv_mode = 1
tensor_format = 0
algo = 1
pad = conv2d.attrs["padding"]
strides = conv2d.attrs["strides"]
dilation = conv2d.attrs["dilation"]
groups = conv2d.attrs["groups"]
oshape = cudnn.conv_output_shape(
tensor_format,
pad,
strides,
dilation,
inputs[0].shape,
inputs[1].shape,
inputs[0].dtype,
conv_dtype,
groups,
)
return te.extern(
oshape,
inputs,
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv2d+bias+act.forward",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
strides[0],
strides[1],
dilation[0],
dilation[1],
activation_mode,
0,
ins[0],
ins[1],
ins[2],
outs[0],
conv_dtype,
groups,
),
name="y",
)
@lower_composite("cudnn.conv2d")
def _lower_conv2d(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a conv2d using cuDNN."""
return cudnn.conv_forward(
inputs[0],
inputs[1],
pad=op.attrs["padding"],
stride=op.attrs["strides"],
dilation=op.attrs["dilation"],
conv_mode=1,
tensor_format=0,
algo=1,
conv_dtype=op.checked_type.dtype,
groups=op.attrs["groups"],
)
| 6,461 | 29.338028 | 94 | py |
tvm | tvm-main/python/tvm/relay/op/annotation/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Annotation related operators."""
from __future__ import absolute_import as _abs
from .annotation import *
| 928 | 43.238095 | 62 | py |
tvm | tvm-main/python/tvm/relay/op/annotation/annotation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Annotation operations."""
from tvm import target
from tvm.runtime import ndarray as _nd
from tvm.runtime import Device as _Device
from . import _make
from .. import op as reg
def _make_virtual_device(device):
if isinstance(device, _Device):
return target.VirtualDevice(device)
if isinstance(device, str):
return target.VirtualDevice(_nd.device(device))
if isinstance(device, target.VirtualDevice):
return device
raise ValueError(f"expecting a Device or device name, but received a {type(device)}")
def on_device(body, device, constrain_result=False, constrain_body=True):
"""Annotates a body expression with device constraints. The constraint influences
how the body is compiled, where the body is evaluated, and where the result of
evaluation is stored.
Note that the defaults for the constrain_body and constrain_result parameters should
almost never need to be overridden by the user. These parameters are exposed here
to help unit tests exercise the PlanDevices pass machinery.
Parameters
----------
body : tvm.relay.Expr
The expression to be annotated.
device : Union[:py:class:`Device`, str]
The device to annotate with.
constrain_result : bool
If false (the default), the result of the on_device is not constrained to be on device.
constrain_body : bool
If true (the default), the body of the on_device is constrained to be on device.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.OnDevice(body, _make_virtual_device(device), constrain_result, constrain_body)
def function_on_device(function, param_devices, result_device):
"""Annotates a Relay function with the device types on which its parameters and result should
be stored.
Parameters
----------
function : tvm.relay.Function
The function to be annotated.
param_devices : Array[Union[:py:class:`Device`, str]]
The devices for each parameter.
result_device: Union[:py:class:`Device`, str]
The device for the function result.
Returns
-------
result : tvm.relay.Function
The annotated function.
"""
return _make.FunctionOnDevice(
function,
[_make_virtual_device(d) for d in param_devices],
_make_virtual_device(result_device),
)
def stop_fusion(data):
"""Annotate an expression to prevent it being fused with following expressions.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.stop_fusion(data)
def checkpoint(data):
"""Annotate an expression to be a checkpoint for the checkpointing memory optimization.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.checkpoint(data)
reg.register_injective_schedule("annotation.checkpoint")
def compiler_begin(data, compiler):
"""Annotate an expression to indicate that it is the beginning of
a regeion that will be handled by the given compiler.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
compiler : Str
The compiler used to generate code of the annotated region.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.compiler_begin(data, compiler)
def compiler_end(data, compiler):
"""Annotate an expression to indicate that it is the end of a region that
is handled by the provided compiler.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
compiler : Str
The compiler used to generate code of the annotated region.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.compiler_end(data, compiler)
| 4,899 | 28.341317 | 97 | py |
tvm | tvm-main/python/tvm/relay/op/annotation/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.annotation._make", __name__)
| 883 | 41.095238 | 62 | py |
tvm | tvm-main/python/tvm/relay/transform/infer_layout_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import
"""
Relay infer correct layout pass.
"""
import tvm
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("relay._transform.InferCorrectLayoutOutput")
class InferCorrectLayoutOutput(Object):
"""An output structure to hold results from FInferCorrectLayout calls."""
def __init__(self, input_layouts, output_layouts, new_attrs):
self.__init_handle_by_constructor__(
_ffi_api.InferCorrectLayoutOutput, input_layouts, output_layouts, new_attrs
)
| 1,374 | 39.441176 | 87 | py |
tvm | tvm-main/python/tvm/relay/transform/suffixes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Add suffix to the relay.Call's span fields"
from collections import defaultdict
import tvm
from ..expr_functor import ExprMutator
from .. import expr as _expr
SUFFIX_STRING = r"_PART_"
class _SuffixTagger(ExprMutator):
"""A pass to traverse the Relay graph to add suffix to the call's span fields.
This making span an unique indicator of a Relay line and we can use it to
obtain the mapping between the Relay that gets generated from a relay frontend
and the Relay after partitioning.
"""
def __init__(self):
ExprMutator.__init__(self)
# key: span or source name, value: counter, indexed from 0
self.lookup = defaultdict(int)
self.suffix = SUFFIX_STRING
# a set to record hashes of an expressions which spans have been already rewritten
self.hashes = set()
def _tag_suffix(self, span):
# To avoid error once we introduce the SequentialSpan in the future
"""https://discuss.tvm.apache.org/
t/pre-rfc-tvm-explorer-infrastructure/13457#pass-source-information-builder-6
"""
# Don't need this if currently
if isinstance(span, tvm.relay.Span):
ori_name = span.source_name.name
new_name = ori_name + self.suffix + str(self.lookup[ori_name])
self.lookup[ori_name] += 1
return tvm.relay.Span(
tvm.relay.SourceName(new_name),
span.line,
span.end_line,
span.column,
span.end_column,
)
return span
def visit(self, expr):
if hasattr(expr, "span"):
return super().visit(expr)
return expr
def visit_call(self, call):
new_args = [self.visit(arg) for arg in call.args]
new_op = self.visit(call.op)
if tvm.ir.structural_hash(call) not in self.hashes:
self.hashes.add(tvm.ir.structural_hash(call))
expr__ = _expr.CallWithFields(
call,
new_op,
new_args,
call.attrs,
call.type_args,
None,
self._tag_suffix(call.span),
)
else:
expr__ = _expr.CallWithFields(
call, new_op, new_args, call.attrs, call.type_args, None, call.span
)
return expr__
def tag_suffixes(mod):
"""Traverses the Relay graph to add suffix to the call's span fields.
That making span as an unique indicator of a Relay call and we can use it to
obtain the mapping between the offloaded result and the frontend operators.
Parameters
----------
tvm.ir.IRModule
The IRModule that gets generated from a relay frontend.
Returns
-------
tvm.ir.IRModule
The IRModule with call's span fields tagged with suffixes.
"""
tagger = _SuffixTagger()
for global_var, func in mod.functions.items():
func = tagger.visit(func)
mod.update_func(global_var, func)
return mod
| 3,818 | 34.361111 | 90 | py |
tvm | tvm-main/python/tvm/relay/transform/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import
"""
Relay pass transformation infrastructure.
"""
import functools
import inspect
import types
import warnings
import tvm.ir
from tvm import relay, te
from tvm.runtime import ndarray as _nd
from ..backend.utils import mangle_module_name
from . import _ffi_api
def build_config(opt_level=2, required_pass=None, disabled_pass=None, trace=None):
"""Configure the build behavior by setting config variables. This function
will be deprecated in TVM v0.7. Instead, we should directly use
tvm.transform.PassContext.
Parameters
----------
opt_level: int, optional
Optimization level. The optimization pass name and level are as the
following:
.. code-block:: python
OPT_PASS_LEVEL = {
"SimplifyInference": 0,
"OpFusion": 1,
"FoldConstant": 2,
"FoldScaleAxis": 3,
"AlterOpLayout": 3,
"CanonicalizeOps": 3,
"CanonicalizeCast": 3,
"EliminateCommonSubexpr": 3,
"CombineParallelConv2D": 4,
"CombineParallelDense": 4,
"CombineParallelBatchMatmul": 4,
"FastMath": 4
}
required_pass: set of str, optional
Optimization passes that are required regardless of optimization level.
disabled_pass: set of str, optional
Optimization passes to be disabled during optimization.
trace: Callable[[IRModule, PassInfo, bool], None]
A tracing function for debugging or introspection.
Returns
-------
pass_context: PassContext
The pass context for optimizations.
"""
warnings.warn(
"relay.build_config will be deprecated. Please use \
tvm.transform.PassContext directly",
DeprecationWarning,
)
return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace)
@tvm._ffi.register_object("relay.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relay.Function in a module. A function
pass class should be created through `function_pass`.
"""
def InferType():
"""Infer the type of an expr.
Returns
-------
ret : tvm.transform.Pass
The registered type inference pass.
"""
return _ffi_api.InferType()
def InferTypeLocal(expr):
"""Infer the type of a single expr, reusing type information to do so.
This populates the checked_type field in expr. We assume existing type information
in the graph is correct!
Parameters
----------
expr: relay.Expr
The expression we want to know the type of
Returns
-------
type: relay.Type
The type of the expression
"""
return _ffi_api.InferTypeLocal(expr)
def FoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense. This pass will
invoke both forward and backward scale folding.
Returns
-------
ret : tvm.transform.Pass
The registered pass to fold expressions.
Note
----
Internally, we will call backward_fold_scale_axis before using
forward_fold_scale_axis as backward folding targets the common conv->bn
pattern.
"""
return _ffi_api.FoldScaleAxis()
def BackwardFoldScaleAxis():
"""Backward fold axis scaling into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to backward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis as backward folding targets the common
conv->bn pattern.
"""
return _ffi_api.BackwardFoldScaleAxis()
def RemoveUnusedFunctions(entry_functions=None):
"""Remove unused global relay functions in a relay module.
Parameters
----------
entry_functions: list[string]
The set of entry functions to start from.
Returns
-------
ret : tvm.transform.Pass
The registered pass to remove unused functions.
"""
if entry_functions is None:
entry_functions = ["main"]
return _ffi_api.RemoveUnusedFunctions(entry_functions)
def ForwardFoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to forward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis, as backward folding targets the
common conv->bn pattern.
"""
return _ffi_api.ForwardFoldScaleAxis()
def SimplifyInference():
"""Simplify the data-flow graph for inference phase. An simplified expression
which is semantically equal to the input expression will be returned.
Note that batch norms will only be simplified if their result is indexed at
tuple index 0.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform operator simplification.
"""
return _ffi_api.SimplifyInference()
def FastMath():
"""Converts the expensive non linear functions to their fast but approximate counterparts.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform fast math operations.
"""
return _ffi_api.FastMath()
def CanonicalizeOps():
"""Canonicalize special operators to basic operators.
This can simplify followed analysis, e.g. expanding bias_add to
expand_dims and broadcast_add.
Returns
-------
ret: tvm.transform.Pass
The registered pass performing the canonicalization.
"""
return _ffi_api.CanonicalizeOps()
def DeadCodeElimination(inline_once=False, ignore_impurity=False):
"""Remove expressions that do not have any users (dead code).
Parameters
----------
inline_once: Optional[Bool]
Whether to inline a binding that is referenced exactly once.
ignore_impurity: Optional[Bool]
Whether to ignore possible side-effects in let-bound expressions.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eliminates the dead code in a Relay program.
"""
return _ffi_api.DeadCodeElimination(inline_once, ignore_impurity)
def LazyGradientInit():
"""Reduces memory usage of gradient tensors
Parameters
----------
Returns
-------
ret: tvm.transform.Pass
A pass which delays and/or reduces memory allocation,
by lazily allocating 0 or one filled tensors.
"""
return _ffi_api.LazyGradientInit()
def FoldConstantExpr(expr, mod, fold_qnn=False):
"""Fold the constant expressions in a Relay program.
Parameters
----------
expr: Expr
The expression to fold
mod: IRModule
The module the expr lives in (for global calls)
fold_qnn: bool
Whether to fold constants for QNN operations.
Returns
-------
new_expr: Expr
The expr after Constant Folding
"""
return _ffi_api.FoldConstantExpr(expr, mod, fold_qnn)
def FoldConstant(fold_qnn=False):
"""Fold the constant expressions in a Relay program.
Because of backward compatibility reason it skips QNN primitives from folding by default.
There are some transformation passes like FakeQuantizationToInteger, which requires to keep QNN
primitives for constant subgraphs. Uncontrolled constant folding of QNN primitives may break
applicability of FakeQuantizationToInteger. We suggest to use FoldConstant pass with none
default fold_qnn=True value only when all other QNN sensitive passes were already applied.
Parameters
----------
fold_qnn: bool
Whether to fold constants for QNN operations.
Returns
-------
ret : tvm.transform.Pass
The registered pass for constant folding.
"""
return _ffi_api.FoldConstant(fold_qnn)
def FuseOps(fuse_opt_level=-1):
"""Fuse operators in an expr to a larger operator according to some rules.
Parameters
----------
fuse_opt_level : int
The level of fuse optimization. -1 indicates that the level will be
inferred from pass context.
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator fusion.
"""
return _ffi_api.FuseOps(fuse_opt_level)
def DefuseOps():
"""The inverse operation of FuseOps. It transforms a fused program returned by FuseOps into the
program before FuseOps. (i.e., x == DefuseOps(FuseOps(x)))
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator defusion.
"""
return _ffi_api.DefuseOps()
def CombineParallelConv2D(min_num_branches=3):
"""Combine multiple conv2d operators into one.
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel conv2d operators.
"""
return _ffi_api.CombineParallelConv2D(min_num_branches)
def CombineParallelDense(min_num_branches=3, to_batch=True):
"""Combine multiple dense operators into one. For example:
.. code-block
data
/ \
dense (2,2) dense (2,2)
| |
elemwise/bcast (2,2) elemwise/bcast (2,2)
Would become:
.. code-block
data
|
batch_matmul+elemwise/bcast (2,2,2)
or (if to_batch=False)
.. code-block
data
|
dense+elemwise/bcast (2,2+2)
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
to_batch_matmul : bool
If True, combine parallel dense ops into batch_matmul op.
If False, combine parallel dense ops into dense op.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel dense operators.
"""
return _ffi_api.CombineParallelDense(min_num_branches, to_batch)
def CombineParallelBatchMatmul(min_num_branches=3):
"""Combine multiple batch matmul operators into one. For example:
.. code-block
data (1, 2, 3)
/ \
batch_matmul(data, (1, 4, 3)) batch_matmul(data, (1, 5, 3))
| |
elemwise/bcast (1, 2, 4) elemwise/bcast (1, 2, 5)
Would become:
.. code-block
data (1, 2, 3)
|
batch_matmul(data, (1, 4+5, 3))
|
elemwise/bcast (1 ,2, 4+5)
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel dense operators.
"""
return _ffi_api.CombineParallelBatchMatmul(min_num_branches)
def BatchingOps():
"""Batching parallel operators into one for Conv2D, Dense and BatchMatmul.
Returns
-------
ret: tvm.transform.Pass
The sequential pass which apply batching for different operator types.
"""
return tvm.transform.Sequential(
[CombineParallelConv2D(), CombineParallelDense(), CombineParallelBatchMatmul()]
)
def AlterOpLayout():
"""Alternate the layouts of operators or replace primitive operators with
other expressions.
This pass can be used for computing convolution in custom layouts or
other general weight pre-transformation.
Returns
-------
ret : tvm.transform.Pass
The registered pass that alters the layout of operators.
"""
return _ffi_api.AlterOpLayout()
class LayoutConfig(object):
"""A structure for customizing the ConvertLayout pass."""
current = None
def __init__(self, skip_layers=None):
self.skip_counter = 0
self.skip_layers = skip_layers if skip_layers is not None else []
def check_skip(self):
skip = self.skip_counter in self.skip_layers
self.skip_counter += 1
return skip
def reset(self):
self.skip_counter = 0
self.skip_layers = []
def __enter__(self):
self._old_manager = LayoutConfig.current
LayoutConfig.current = self
return self
def __exit__(self, ptype, value, trace):
LayoutConfig.current = self._old_manager
def ConvertLayout(desired_layouts):
"""Given a dest layout, this pass transforms the expr such that most of the ops input data
layout is changed to the dest layout. In ideal situation, there are only 2 layout transforms,
one at the start and one at the end.
This pass is not a part of relay.build and is expected to be called between framework-relay
parser and relay.build call. This is very helpful for hardware backends that support/prefer only
type of data layout.
RFC - https://discuss.tvm.apache.org/t/layout-conversion-pass/4009
This pass uses most of the AlterOpLayout and InferCorrectLayout infrastructure. We can define
new layouts for conv2d ops for now. Most of the other operators try to adapt to their input
layout using the InferCorrectLayout infrastructure.
Parameters
----------
desired_layouts : map of op_name to list of layouts
Specify a mapping of operator names to a list of layouts to convert to, in the order
defined by the operator. An example for nn.conv2d could be: {"nn.conv2d", ["NHWC", "OHWI]},
where the first item in the list specifies the data layout and the second specifies the
kernel layout.
Returns
-------
pass: FunctionPass
The pass.
"""
return _ffi_api.ConvertLayout(desired_layouts)
def Legalize(legalize_map_attr_name="FTVMLegalize"):
"""Legalizes an expression with another expression.
This pass can be used to replace an expr with another expr for target
dependent optimizations. For example, one expr, though semnatically
equivalent to the other, can have better performance on a target. This pass
can be used to legalize the expr in a target-dependent manner.
Parameters
----------
legalize_map_attr_name : str
The Op's attr name which corresponds to the legalize rule function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that rewrites an expr.
"""
return _ffi_api.Legalize(legalize_map_attr_name)
def MergeComposite(pattern_table):
"""Merge multiple operators into a single composite relay function.
Parameters
----------
pattern_table : List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Function]]
A list of (pattern_name, pattern, check) tuples.
The order of the patterns in the list will determine the order
of priority in which they are matched.
'check' is a function to check whether an extracted pattern matches.
It can be implemented by pattern writer but if not specified it will
always return True.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges operators into a single composite
relay function.
"""
pattern_names = []
patterns = []
checks = []
for tup in pattern_table:
if len(tup) == 2:
pattern_name, pattern = tup
check = lambda extract: True
elif len(tup) == 3:
pattern_name, pattern, check = tup
pattern_names.append(pattern_name)
patterns.append(pattern)
checks.append(check)
return _ffi_api.MergeComposite(pattern_names, patterns, *checks)
def MergeCompilerRegions():
"""Merge together compiler regions.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges compiler regions.
"""
return _ffi_api.MergeCompilerRegions()
def ToANormalForm():
"""Turn Graph Normal Form expression into A Normal Form Expression.
The scope of the root expression is the global scope.
The scope of any non root expression is the least common ancestor of all it's scope.
Values are ordered by post-DFS order in each scope.
Returns
-------
ret : Union[tvm.transform.Pass, tvm.relay.Expr]
The registered pass that transforms an expression into A Normal Form.
"""
return _ffi_api.ToANormalForm()
def ToANormalFormExpr(e):
"""ToANormalForm, but on expression level.
Parameters
----------
e : Expr
The graph expression.
Returns
-------
ret : Expr
The transformed expresion.
"""
return _ffi_api.ToANormalFormExpr(e)
def ToBasicBlockNormalForm():
"""Turn an expression to Basic Block Normal Form.
We define a block as a group of expressions implied by the scope structure.
Each graph node can only belong to a single block.
For any value that is being used in multiple blocks, it has to be referred
by a Var which is defined in a block, whose scope is the least common ancestor
of blocks this value is used.
Returns
-------
ret: tvm.transform.Pass
The registered pass that transforms an expression into Basic Block Normal Form.
"""
return _ffi_api.ToBasicBlockNormalForm()
def ToCPS(expr, mod=None):
"""
Turn expression into continuation passing style(CPS).
Every intermediate compute will be passed to a continuation.
Returns
-------
result: tvm.transform.Pass
The registered pass that transforms an expression into CPS.
"""
return _ffi_api.to_cps(expr, mod)
def EtaExpand(expand_constructor=False, expand_global_var=False):
"""Add abstraction over a constructor or global variable bound to a function
Parameters
----------
expand_constructor: bool
Whether to expand constructors.
expand_global_var: bool
Whether to expand global variables.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eta expands an expression.
"""
return _ffi_api.EtaExpand(expand_constructor, expand_global_var)
def ToGraphNormalForm():
"""Turn a Relay program in A Normal Form into Graph Normal Form
Returns
-------
ret : tvm.transform.Pass
The registered pass that transforms an expression into Graph Normal Form.
"""
return _ffi_api.ToGraphNormalForm()
def EliminateCommonSubexpr(fskip=None):
"""Eliminate common subexpressions.
Parameters
----------
fskip: Callable
The callback function that decides whether an expression should be
skipped.
Returns
-------
ret : tvm.transform.Pass
The registered pass that eliminates common subexpressions.
"""
return _ffi_api.EliminateCommonSubexpr(fskip)
def PartialEvaluate():
"""Evaluate the static fragment of the code.
Note
----
This transformation could be either `Module -> Module` or `Expr -> Expr`.
It will directly transform the input expression to a new one if the target
expression is provided. Otherwise, it will rely on the pass manager to
carry out transformation.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs partial evaluation on an expression.
"""
return _ffi_api.PartialEvaluate()
def CanonicalizeCast():
"""
Canonicalize cast expressions to make operator fusion more efficient.
Returns
-------
ret : tvm.transform.Pass
The registered pass that canonicalizes cast expression.
"""
return _ffi_api.CanonicalizeCast()
def LambdaLift():
"""
Lift the closure to global function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that lifts the lambda function.
"""
return _ffi_api.LambdaLift()
def PartitionGraph(mod_name="default", bind_constants=True):
"""Partition a Relay program into regions that can be executed on different
backends.
Parameters
----------
mod_name : string
Controls the prefix of the name of each partitioned subraph.
If `mod_name` is None, then `tvmgen_` prefix is used.
Otherwise, `tvmgen_mod_name_` prefix is used.
bind_constants: bool
Whether or not to bind constants in partitioned subgraphs. Note that the codegen needs
to maintain the bound constants; Otherwise the constants will be maintained by
the metadata module. So it is recommended for C-source based codegens to
set bind_constants=False to avoid embedding large constants in a C source file.
Returns
-------
ret: tvm.transform.Pass
The registered pass that partitions the Relay program.
"""
mod_name = mangle_module_name(mod_name)
return _ffi_api.PartitionGraph(mod_name, bind_constants)
def AnnotateTarget(targets, include_non_call_ops=True):
"""Annotate ops in an experession with a provied compiler/target and then
use it for codegen.
Parameters
----------
targets : str or List[str]
The list of target compilers used for codegen.
include_non_call_ops : boolean
If True then non-call ops also will be annotated with targets
If False then non-call ops will not be processed
Returns
-------
ret : tvm.transform.Pass
The annotated pass that wrapps ops with subgraph_start and
subgraph_end.
"""
if isinstance(targets, str):
targets = [targets]
return _ffi_api.AnnotateTarget(
[tvm.runtime.container.String(t) for t in targets], include_non_call_ops
)
def DynamicToStatic():
"""If possible, convert tvm.relay.dynamic* ops to static versions
Returns
-------
ret : tvm.transform.Pass
The registered pass for dynamic->static conversion.
"""
return _ffi_api.DynamicToStatic()
def Inline():
"""Perform inlining on the given Relay IR module. The global functions that
are marked as `inline` should be always inlined. A cost model will be
needed in the future to decide if it is profitable to inline the function.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs inlining for a Relay IR module.
"""
return _ffi_api.Inline()
def gradient(expr, mod=None, mode="higher_order"):
"""
Transform the input function,
returning a function that calculate the original result,
paired with gradient of the input.
Parameters
----------
expr : tvm.relay.Expr
The input expression, which is a Function or a GlobalVar.
mod : Optional[tvm.IRModule]
mode : Optional[String]
The mode of the automatic differentiation algorithm.
'first_order' only works on first order code, but will not produce
reference nor closure.
'higher_order' works on all code using reference and closure.
Returns
-------
expr : tvm.relay.Expr
The transformed expression.
"""
if mode == "first_order":
warnings.warn(
"using transform.gradient for first-order AD is deprecated, please use the"
"FirstOrderGradient module pass",
DeprecationWarning,
)
if mod is not None:
raise RuntimeError(
"to run first-order AD on a module, please use the FirstOrderGradient module pass."
)
return FirstOrderGradient()(tvm.IRModule.from_expr(expr))["main"]
if mode == "higher_order":
return _ffi_api.gradient(expr, mod)
raise Exception("unknown mode")
def FirstOrderGradient():
"""
Transforms all global functions in the module to return the original result, paired with the
gradients of the inputs. This pass transforms each global function independently and does not
support interprocedural AD. Additionally, this pass does not support any control-flow or
references, and should only be used on pure data-flow graphs.
Returns
-------
ret : tvm.transform.Pass
The registered FirstOrderGradient pass.
"""
return _ffi_api.FirstOrderGradient()
def Defunctionalization(func, mod):
"""
Performs defunctionalization on func,
transforming func from a higher-order program to a first-order program.
At each call site, the function is cloned and type parameters are substituted in.
Function arguments are encoded as datatypes
and additional apply functions are used for application.
Parameters
----------
func : tvm.relay.Function
The input function, which should not be polymorphic or be higher-order.
This is because all types must be known and we can't encode function arguments
to the program itself.
mod : tvm.IRModule
The IRModule containing function and type definitions,
which is also mutated during this pass.
Returns
-------
expr : tvm.relay.Function
The output function.
"""
return _ffi_api.Defunctionalization(func, mod)
def to_cps(func, mod=None):
"""
Turn expression into CPS expression.
Every intermediate compute will be passed to a continuation.
Parameters
----------
func: tvm.relay.Function
The input function.
mod: Optional[tvm.IRModule]
The global module.
Returns
-------
result: tvm.relay.Function
The output function.
"""
use_mod = mod if mod is not None else tvm.ir.IRModule()
return _ffi_api.to_cps(func, use_mod)
def un_cps(func):
"""
Turn an cps function into a Function without the continuation argument.
Note that this will not give the exact same interface as before cps:
If the input/output is higher order, they will still be in cps form.
Parameters
----------
func: tvm.relay.Function
The input function
Returns
-------
result: tvm.relay.Function
The output function
"""
return _ffi_api.un_cps(func)
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyFunctionPass(FunctionPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
# fpass is now a special pass that replaces every
# function to f1
fpass = TestReplaceFunc(f1)
# now every function in input_mod is replaced by f1
res_mod = fpass(input_mod)
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.function_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = function_pass(m)
# Now constant folding should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not callable(pass_arg):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
@function_pass(opt_level=1)
class ChangeBatch:
"""
Change the batch size.
Parameters
----------
data: Dict[relay.Var, int]
A dictionary of all the params to change.
The keys are all params, and the values are which dimension hold the batch.
batch_size: int
The batch size to change to.
Returns
-------
pass: FunctionPass
The pass.
"""
def __init__(self, data, batch_size=16):
self.data = data
self.batch_size = batch_size
def transform_function(self, func, mod, ctx):
func = relay.Function(func.params, func.body, None, func.type_params, func.attrs)
change_batch = self
class ChangeBatchMutator(tvm.relay.ExprMutator):
def visit_var(self, var):
if var in change_batch.data:
ty = var.type_annotation
new_shape = list(ty.shape)
new_shape[change_batch.data[var]] = change_batch.batch_size
return relay.Var(var.name_hint, relay.TensorType(new_shape, ty.dtype))
return var
return ChangeBatchMutator().visit(func)
def DenseToSparse(weight_name, weight_shape):
"""
Rewrite qualified ```nn.dense operation``` to ```nn.sparse_dense```
This pass is used in ```data_dep_optimization.bsr_dense```
Parameters of this pass is generated by ```analysis.sparse_dense.process_params```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified sparse contrains
weight_shape: Array[Array[IntImm]]
Weights shape in BSR format.
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.DenseToSparse(weight_name, weight_shape)
def Conv2dToSparse(weight_name, weight_shape, layout, kernel_size):
"""
Rewrite qualified ```nn.conv2d operation``` to ```nn.sparse_conv2d```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified sparse contrains
weight_shape: Array[Array[IntImm]]
Weights shape in BSR format.
layout : str
layout of data
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.Conv2dToSparse(weight_name, weight_shape, layout, kernel_size)
def Conv2dToSparse2(layout, kernel_size, blocksize, sparsity_threshold):
"""
Rewrite freezed ```nn.conv2d``` operation to ```nn.sparse_conv2d```
Parameters
----------
layout : str
layout of data
kernel_size : int
kernel size of conv2d
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.Conv2dToSparse2(layout, kernel_size, *blocksize, sparsity_threshold)
def SimplifyFCTranspose(target_weight_name):
"""
Rewrite ```y = nn.dense(x, transpose(w, [1, 0]))``` to ```y = nn.dense(x, wt)```
This pass is used in ```data_dep_optimization.simplify_fc_transpose```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified ```y = nn.dense(x, transpose(w, [1, 0]))```
This parameter is generated by ```analysis.search_fc_transpose``` function
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyFCTranspose pass.
"""
return _ffi_api.SimplifyFCTranspose(target_weight_name)
def SimplifyExpr():
"""
Simplify the Relay expression, including merging consecutive reshapes.
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyExpr pass.
"""
return _ffi_api.SimplifyExpr()
def PlanDevices(config):
"""
Uses existing "on_device" and "device_copy" calls to infer the virtual device on which
every Relay sub-expression should run and the result stored. Captures the result of that
analysis using new "on_device" and "device_copy" calls. Sub-expressions which are
not otherwise constrained are assigned to the default primitive virtual device describe by
config. However data and computations which must be hosted on a CPU (such as shapes and
shape functions) use the host virtual device of the config.
Parameters
----------
config : tvm.CompilationConfig
The compilation configuration, specifying available targets and default devices.
Returns
-------
ret : tvm.transforms.Pass
The pass.
"""
return _ffi_api.PlanDevices(config)
def ManifestLifetimes():
"""
Manifest the lifetimes of variables after allocations have been manifested, by inserting kill
operations once variables become dead.
"""
return _ffi_api.ManifestLifetimes()
def FoldExplicitPadding():
"""
FoldExplicitPadding finds explict padding before an op that can support
implicit padding and fuses them.
Returns
-------
ret : tvm.transform.Pass
The registered ImplicitPadding pass.
"""
return _ffi_api.FoldExplicitPadding()
def AnnotateSpans():
"""
Annotate a program with span information by first generating its textual
representation and then parsing it back into a Relay AST annotated with
span information.
Returns
-------
ret : tvm.transform.Pass
The registered AnnotateSpans pass.
"""
return _ffi_api.AnnotateSpans()
def FakeQuantizationToInteger(hard_fail=False, use_qat=False, optional_qnn_ops=None):
# pylint: disable=anomalous-backslash-in-string
"""
Find regions of the graph of the form
.. code-block:: text
x w
| |
dq dq
\\ /
op1
|
op2
|
q
where ``q == qnn.quantize`` and ``dq = qnn.dequantize``
and rewrite them into integer versions of ``op1`` and ``op2``
Rules for rewriting indivdual ops are in fake_quantization_to_integer.py
Parameters
----------
hard_fail : boolean
How do deal with errors during graph rewriting.
If true, raise an error.
If false, skip rewriting the subgraph.
use_qat : boolean
To perform an additional QAT pass - convert enabled operations with dequantized inputs.
Example: in the graph above op2 is not registered with the FakeQuantizationToInteger
attribute, op1 operation can still be converted. Converted pattern below:
.. code-block:: text
x w
| |
\\ /
op1
|
dq
|
op2
|
q
optional_qnn_ops : List[str]
Specify a list of operator names to explicitly enable conversion for
specific ops disabled by default.
Example: ['nn.softmax']
Returns
-------
ret : tvm.transform.Pass
The registered FakeQuantizationToInteger pass.
"""
if optional_qnn_ops is None:
optional_qnn_ops = []
return _ffi_api.FakeQuantizationToInteger(hard_fail, use_qat, optional_qnn_ops)
def FlattenAtrousConv():
# pylint: disable=anomalous-backslash-in-string
"""
The purpose of this pass is to find a sequence of space_to_batch_nd-conv2d-batch_to_space_nd
operations:
.. code-block:: text
x w
| |
s2b |
\\ /
conv2d
|
b2s
and convert them into subgraphs with a convolution with the modified "dilation" and
recalculated "padding" parameters.
Returns
-------
ret : tvm.transform.Pass
The registered FlattenAtrousConv pass.
"""
return _ffi_api.FlattenAtrousConv()
def ToMixedPrecision(mixed_precision_type="float16", missing_op_mode=1):
"""
Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version
where as many operations as possible are in the target mixed_precision_type.
Parameters
----------
mixed_precision_type: str
The target datatype to transform operations in the graph to use.
missing_op_mode: int
Determines how to handle ops not registered with FTVMMixedPrecisionConversionType
0: Does not allow any missing ops. Will throw errors when encountering any.
1: Allow missing ops but emit warnings.
2: Allow missing ops and silently ignore them.
relay.ToMixedPrecision.keep_orig_output_dtype: boolean
Defines if outputs should be retained in original data type or convert to
mixed_precision_type. By default this parameter is False and transformation
modifies the data types of outputs to mixed_precision_type.
This parameter is not part of explicit arguments of the transformation, but should
be passed through tvm.transform.PassContext.
Returns
-------
ret : tvm.transform.Pass
The registered pass.
"""
if missing_op_mode < 0 or missing_op_mode > 2:
raise ValueError("Missing op mode is either 0, 1, or 2")
return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode)
def SplitArgs(max_function_args):
"""Split function with huge number of arguments to smaller pieces.
Returns
-------
ret : tvm.transform.Pass
The registered pass for constant folding.
"""
return _ffi_api.SplitArgs(max_function_args)
def OutlineCompilerFunctionsWithExistingGlobalSymbols(compiler_filter=""):
"""Outlines all literal functions in direct call positions which have a "Compiler"
attribute.
The outlined functions are bound to unique global vars according to their existing
"global_symbol" attribute. At most one function with the same global symbol is outlined.
If compiler_filter is non-empty only functions with that as their attribute value are
outlined.
This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism
to prepare the IRModule before custom lowering.
Parameters
----------
compiler_filter : String
If non-empty, the "Compiler" attribute to filter on.
Returns
-------
ret : tvm.transform.Pass
The pass.
"""
return _ffi_api.OutlineCompilerFunctionsWithExistingGlobalSymbols(compiler_filter)
def MarkCompilerFunctionsAsExtern(compiler_filter=""):
"""Marks all global functions which have a "Compiler" attribute matching
compiler_filter as 'extern'.
The function's attributes are replaced with a single "Extern" attribute, and
all calls to the function are switched to use the 'call_lowered' calling convention.
If compiler_filter is non-empty only functions with that as their attribute value are
outlined.
This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism to
cleanup the IRModule after custom lowering.
Parameters
----------
compiler_filter : String
If non-empty, the "Compiler" attribute to filter on.
Returns
-------
ret : tvm.transform.Pass
The pass.
"""
return _ffi_api.MarkCompilerFunctionsAsExtern(compiler_filter)
def CapturePostDfsIndexInSpans():
"""Captures the post-dfs index and dominator post-dfs index of (most) expression nodes in
their span, in the form "index:<post-dfs index>:<dominator post-dfs index>".
This is useful for debugging since a) it helps identify pretty-printed sub-expressions within
the overall model and b) the indexes are heavily used by Collage for its compact representation
of sub-graphs.
Note that Op and Constructor nodes are not changed even though they are assigned an
post-dfs index.
Returns
-------
ret : tvm.transform.Pass
The pass.
"""
return _ffi_api.CapturePostDfsIndexInSpans()
def InlineCompilerFunctionsBoundTo(global_vars):
"""Inlines all global functions bound to a global var in global_vars.
Both the global "Compiler" attributed function, and any calls to "Composite" functions it its
body are inlined.
This pass may be useful for external codegen which needs to undo partitioning based on
properties of the entire partition.
Parameters
----------
global_vars : Array[tvm.relay.GlobalVar]
The global vars of all 'Compiler' functions to inline.
Returns
-------
ret : tvm.transform.Pass
The pass.
"""
return _ffi_api.InlineCompilerFunctionsBoundTo(global_vars)
def CollagePartition(config, cost_estimator=None):
"""Partition the bodies of all functions according to the available targets so as to
minimize model latency. See https://github.com/apache/tvm-rfcs/blob/main/rfcs/0062-collage.md.
Parameters
----------
config : CompilationConfig
The available targets.
cost_estimator : CostEstimator, optional
The custom cost estimator to use for costing each candidate partition.
Returns
-------
ret : tvm.transform.Pass
The pass.
"""
if cost_estimator is None:
cost_estimator = relay.collage.CostEstimator()
return _ffi_api.CollagePartition(config, cost_estimator)
def DivToMul():
"""Transform division by a constant to multiplication by the inverse of the constant"""
return _ffi_api.DivToMul()
| 45,020 | 28.894422 | 100 | py |
tvm | tvm-main/python/tvm/relay/transform/recast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=use-list-literal
"""Relay type recasting pass"""
import tvm
from tvm import relay
from tvm.ir import IRModule
from .transform import InferType
from ..analysis import count_layers
from ..expr_functor import ExprMutator, Call
class RecastMutator(ExprMutator):
"""Cast operations to the target type."""
def __init__(self, dtype, out_dtype, valid_ops, valid_op_count, skip_layers):
self.dtype = dtype
self.out_dtype = out_dtype
self.depth_count = 0
self.valid_ops = [relay.op.get(op) for op in valid_ops]
self.valid_op_count = valid_op_count
self.skip_layers = skip_layers
# Convert negative indices to positive ones.
for i, layer in enumerate(skip_layers):
if layer < 0:
skip_layers[i] = self.valid_op_count + layer
super().__init__()
def visit_call(self, call):
# Keep track of our current depth and layer count
# so we can know whether to skip this layer or not.
current_depth = self.depth_count
current_layer = self.valid_op_count - current_depth - 1
if call.op in self.valid_ops:
self.depth_count += 1
# Visit current call operation
new_fn = self.visit(call.op)
# Visit current arguments
args = []
for arg in call.args:
args.append(self.visit(arg))
self.depth_count = current_depth
# Downcast this op if its the correct type and not skipped.
if call.op in self.valid_ops and current_layer not in self.skip_layers:
# Recast inputs to specified type.
if call.op == relay.op.get("concatenate"):
if len(call.args) != 1 or not isinstance(call.args[0], relay.expr.Tuple):
return Call(new_fn, args, call.attrs)
tuple_args = [self.visit(arg) for arg in call.args[0].fields]
new_args = list()
for arg in tuple_args:
new_args.append(relay.cast(arg, dtype=self.dtype))
new_args = [relay.expr.Tuple(new_args)]
else:
args = [self.visit(arg) for arg in call.args]
new_args = list()
for arg in args:
new_args.append(relay.cast(arg, dtype=self.dtype))
# If out_dtype is in the attributes, we need to update it.
orig_dtype = None
if call.attrs is not None and "out_dtype" in call.attrs.keys():
new_attr_dict = {}
for attr in call.attrs.keys():
attr_value = call.attrs[attr]
if isinstance(attr_value, tvm.ir.container.Array):
attr_value = tuple(attr_value)
new_attr_dict[str(attr)] = attr_value
new_attr_dict["out_dtype"] = self.out_dtype
attr_type = str(call.attrs).split("(")[0]
new_attrs = tvm.ir.make_node(attr_type, **new_attr_dict)
if call.attrs["out_dtype"] != "":
orig_dtype = call.attrs["out_dtype"]
else:
new_attrs = call.attrs
if orig_dtype is None:
# Perform type inference to determine the original type.
new_mod = IRModule.from_expr(call)
new_mod = InferType()(new_mod)
checked_arg = new_mod["main"].body
orig_dtype = checked_arg.checked_type.dtype
# Recast the output for compatibility with other graph operations.
return relay.cast(Call(new_fn, new_args, new_attrs), orig_dtype)
# Otherwise return the unchanged call.
return Call(new_fn, args, call.attrs)
def recast(expr, dtype, out_dtype, ops=None, skip_layers=None):
"""Convert the types of operations in a graph to a new value.
Note that this is primarily useful for testing performance of individual
operations at the new datatype. In a real setting, this pass will
almost certainly do a poor job converting from one datatype to another
as it just applies hard casting. For example, when recasting from float
to integer, many small values will simply be set to 0. Although this will
allow autotuning and benchmarking to produce proper timings at the new
data type, the output of the model will of course be heavily impacted.
Parameters
---------
expr: tvm.relay.Expr, tvm.relay.Function, or tvm.ir.IRModule
The original function that will have its type changed.
dtype: str
The target type to cast to.
out_dtype: str
The output type to cast to.
ops: List[str]
A list of operations that should have their type changed,
others will be left as is.
skip_layers: List[int]
A list of integers indicating operations that should
not have their type changed, counted starting with the
first valid operation encountered. Negative indices are
allowed and indicate starting at the last layer.
Returns
-------
output_expr : tvm.relay.Expr, tvm.relay.Function, or tvm.ir.IRModule
The graph after recasting to the specified datatype.
"""
return_mod = False
if isinstance(expr, tvm.ir.IRModule):
expr = expr["main"]
return_mod = True
if ops is None:
ops = ["nn.conv2d"]
if skip_layers is None:
skip_layers = []
layer_depth = count_layers(expr, ops)
recast_pass = RecastMutator(dtype, out_dtype, ops, layer_depth, skip_layers)
expr = recast_pass.visit(expr)
if return_mod:
return tvm.IRModule.from_expr(expr)
return expr
| 6,468 | 41.84106 | 89 | py |
tvm | tvm-main/python/tvm/relay/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for Relay transformation passes."""
import tvm._ffi
tvm._ffi._init_api("relay._transform", __name__)
| 899 | 41.857143 | 62 | py |
tvm | tvm-main/python/tvm/relay/transform/mixed_precision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,unused-argument
"""Default behavior for ops in mixed_precision pass. Import this file to use."""
from typing import List
from tvm.relay.op import register_mixed_precision_conversion
# MIXED_PRECISION_ALWAYS ops should always be done in lower precision due to the speed and memory
# savings. MIXED_PRECISION_FOLLOW ops can be done in lower precision but don't have speedups to
# justify a cast. MIXED_PRECISION_NEVER colored ops should not be done in lower precision due to
# numerical reasons.
MIXED_PRECISION_ALWAYS = 0
MIXED_PRECISION_FOLLOW = 1
MIXED_PRECISION_NEVER = 2
# Default lists inspired from TF's classifications:
# github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/core/grappler/optimizers/auto_mixed_precision_lists.h
# They have a bias toward Nvidia Tensor Cores so modify lists per your hardware choice.
DEFAULT_ALWAYS_LIST = [
"nn.conv1d",
"nn.conv2d",
"nn.conv3d",
"nn.conv1d_transpose",
"nn.conv2d_transpose",
"nn.conv3d_transpose",
"nn.dense",
"nn.batch_matmul",
]
DEFAULT_FOLLOW_LIST = [
# These ops add new data or change shape
"nn.pad",
"nn.batch_flatten",
"concatenate",
"zeros",
"split",
"squeeze",
"transpose",
"expand_dims",
"reshape",
"dyn.reshape",
"broadcast_to_like",
"dyn.broadcast_to",
"strided_slice",
"dyn.strided_slice",
"take",
"argwhere",
"where",
"tile",
"dyn.tile",
"scatter",
"scatter_elements",
"scatter_nd",
"full",
"dyn.full",
"nn.depth_to_space",
# Comparison
"less",
"greater",
"less_equal",
"greater_equal",
# By definition copy and cast will depend on inputs for output.
"copy",
"cast",
"cast_like",
# Simple arithmetic
"add",
"subtract",
"multiply",
"divide",
"nn.bias_add",
"nn.batch_norm",
"sqrt",
"shape_of",
# Simple activations
"max",
"min",
"maximum",
"minimum",
"argmax",
"argmin",
"nn.relu",
"nn.leaky_relu",
"nn.prelu",
"nn.dropout",
# Complicated activations which saturate in a narrow range
"sigmoid",
"tanh",
"fast_tanh", # Some coefficients outside of representable range, but probably ok
"fast_exp",
"fast_erf",
"clip", # Usually safe, may result in oddity if clip greater than fp16 range
# Pooling operations
"nn.max_pool1d",
"nn.max_pool2d",
"nn.max_pool3d",
"nn.avg_pool1d",
"nn.avg_pool2d",
"nn.avg_pool3d",
# "nn.global_max_pool1d", # does not exist yet
"nn.global_max_pool2d",
# "nn.global_max_pool3d", # does not exist yet
"nn.adaptive_max_pool1d",
"nn.adaptive_max_pool2d",
"nn.adaptive_max_pool3d",
"image.resize2d",
]
DEFAULT_NEVER_LIST = [
# In general if |f(x)| >> |x| for expected inputs then put the op here.
"exp",
"power",
"nn.cross_entropy",
"nn.cross_entropy_with_logits",
"nn.softmax",
"nn.l2_normalize",
# Error function doesn't seem to be able to be lowered into fp16 version in llvm.
# Move to follow list when it does.
"erf",
# Do not allow arange arguments (begin/end) to be fp16. "end" can be a big fp32 number
# not representable in fp16.
"arange",
# Ops that could involve a large summation are not allowed in fp16.
"nn.global_avg_pool2d",
"nn.adaptive_avg_pool1d",
"nn.adaptive_avg_pool2d",
"nn.adaptive_avg_pool3d",
"sum",
"mean",
"variance",
"nn.layer_norm",
]
# Returns a decorator which registers for every given op, the function under FTVMMixedPrecisionConversionType
def register_func_to_op_list(list_ops: List):
def decorator(func):
for op_name in list_ops:
register_mixed_precision_conversion(op_name, func=func)
return decorator
def get_generic_out_dtypes(call_node: "relay.Call", mixed_precision_type: str) -> List[str]:
"""A function which returns output dtypes in a way which works for most ops.
Parameters
---------
call_node: relay.Call
The call node containing the op.
mixed_precision_type: str
The target type to run the operation in.
Returns
-------
output_dtypes : [str, str]
A list of two strings. The first represents the datatype used for accumulation
in the operation. The second represents the actual output datatype.
"""
# Assume support accumulation dtypes <---> has out_dtype attr.
# This is because there is no better way right now to tell which ops support accumulating
# at different data types.
# Some discussion here about making this better is here:
# https://discuss.tvm.apache.org/t/rfc-relay-fp32-fp16-model-support/9994/4?u=andrewzhaoluo
if hasattr(call_node.attrs, "out_dtype"):
# TODO (AndrewZhaoLuo): evaluate consistent support for mixed_type accumulators
# return ["float32", mixed_precision_type]
return [mixed_precision_type, mixed_precision_type]
# [accumulation_dtype, output_dtype] for the operations
return [mixed_precision_type, mixed_precision_type]
# Functions for FTVMMixedPrecisionConversionType which
# Take in CallNodes and a DType and returns a conversion type,
# an accumulation dtype, and an output_dtype.
@register_func_to_op_list(list_ops=DEFAULT_ALWAYS_LIST)
def generic_always_op(call_node: "relay.Call", mixed_precision_type: str) -> List:
return [MIXED_PRECISION_ALWAYS] + get_generic_out_dtypes(call_node, mixed_precision_type)
@register_func_to_op_list(list_ops=DEFAULT_FOLLOW_LIST)
def generic_follow_op(call_node: "relay.Call", mixed_precision_type: str) -> List:
return [MIXED_PRECISION_FOLLOW] + get_generic_out_dtypes(call_node, mixed_precision_type)
@register_func_to_op_list(list_ops=DEFAULT_NEVER_LIST)
def generic_never_op(call_node: "relay.Call", mixed_precision_type: str) -> List:
return [MIXED_PRECISION_NEVER] + get_generic_out_dtypes(call_node, mixed_precision_type)
| 6,766 | 32.666667 | 111 | py |
tvm | tvm-main/python/tvm/relay/transform/flexible_shape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay functions for wrapping a module with flexible shape dispatch."""
import tvm
from tvm import relay
def override_shape(tensor_type, axis, dim):
"""Change a dimension in a tensor shape."""
# Handle multiple tensors by overriding the shape of each.
if isinstance(tensor_type, relay.TupleType):
tensor_type = tensor_type.fields
else:
tensor_type = [tensor_type]
# Create new tensortypes for each input.
new_types = []
for t_type in tensor_type:
new_dims = list(t_type.shape)
new_dims[axis] = dim
new_types.append(relay.TensorType(new_dims, t_type.dtype))
# Dont return a tuple if there is a single tensor.
if len(new_types) == 1:
return new_types[0]
return relay.TupleType(tvm.runtime.convert(new_types))
def specialize_body(mod, function, axis, dim, input_indices, affects_output=True):
"""
Create a subgraph to handle specific input shapes
This function takes in a module and one of it's functions and creates a
similar function with a specific input shape. It then attaches the new function
to the module. Calling this function multiple times results in a module that
contains several similar functions each specialized to a specific input shape.
This allows a dispatch handler to be built on top of the module to deal with
flexible shapes.
There are a few modes to this function. When the specialized function has multiple
flexible inputs, the index of those inputs must be provided to the input_indices argument.
In this case, the axis of the flexible dimension for each of those inputs must be the same.
By default, this function assumes that the output shape is dependent on the input
shape (as is the case in dynamic batching) and will also specialize the output type
accordingly. If this is not true, the affects_output argument must be set to False.
Parameters
----------
mod: IRModule
The module that contains specialized functions and the dispatcher.
function: Function
The original non-specialized function that will be transformed.
axis: int
Which axis the flexible shape is on.
dim: int
The shape to specialize the new subgraph for along the axis dim.
input_indices: List[int]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
gvar : GlobalVar
The new variable for the specialized subgraph.
spec_types : List[TensorType]
A list of the new specialized types for each input in the graph.
"""
# Iterate through specified inputs and construct specialized shapes for each.
new_params = list(function.params)
data_binding = {}
dyn_data_array = []
for inp in input_indices:
data = function.params[inp]
flex_ty = override_shape(data.type_annotation, axis, dim)
dyn_data = relay.Var(data.name_hint, type_annotation=flex_ty)
new_params[inp] = dyn_data
data_binding[data] = dyn_data
dyn_data_array.append(dyn_data)
# Create a new function body for the modified shapes.
new_body = relay.expr.bind(function.body, data_binding)
# Only change the output shape if the input shape affects it.
if affects_output:
new_ret_ty = override_shape(function.ret_type, axis, dim)
else:
new_ret_ty = function.ret_type
gvar = relay.GlobalVar("main_" + str(dim))
# Add the new function to the main IRModule.
mod[gvar] = relay.Function(
new_params, new_body, new_ret_ty, function.type_params, function.attrs
)
return gvar, [d.type_annotation for d in dyn_data_array]
def flexible_dispatch(
mod, buckets, axis=0, auto_pad=False, pad_value=0, input_indices=None, affects_output=True
):
"""
Enable inference of multiple shaped inputs in one module.
This transformation adds a handler around a module that
checks input shapes and dispatches to a subgraph specialized
to handle the specific shapes of that input. If no exactly matching
subgraph is available, the input will be run using full dynamism.
For best performance, specify all the sizes the module will
be likely to see using the buckets argument.
By default, this function will dispatch shapes that exactly match one
of the buckets to a corresponding subgraph. All non-matching shapes
use the same fully dynamic fallback. This can be detrimental to performance
for those non-matching shapes. Setting auto_pad to True causes this
function to round-up the shape of non-matching inputs to the closest
bucket. This allows them to use the tuned kernels of bucket shapes
which can improve performance.
Functions that have multiple inputs sharing a dynamic axis, which
is common for batch size or sequence length dynamism, are supported
through the input_indices argument.
Many types of dynamism such as batching affect both the input and output
shape, however this is not always the case. If the output shape
is independent of the input, the affects_output argument of this
function must be set to False.
Parameters
----------
buckets: list[int]
The sizes of the input dimension that should be explicitly handled.
Each value in buckets will have a corresponding subgraph constructed to
handle it.
axis: int
The dimension of the input that should be made flexible. This will
most often be used for the batch dimension.
auto_pad: Optional[bool]
If True, then padding will be inserted to values that don't match one of
the provided buckets.
pad_value: Optional[float]
When auto_pad is true, padding will be done with this value.
input_indices: Optional[List[int]]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
mod : IRModule
The new module wrapped with a flexible shape dispatch handler.
"""
main_fn = mod["main"]
# Default to single input if not specified.
if input_indices is None:
input_indices = [0]
# Extract all input data and create a new dynamic variable for each.
data = []
dyn_data = []
for i in input_indices:
data.append(main_fn.params[i])
dyn_shape = override_shape(data[i].type_annotation, axis, relay.Any())
dyn_data.append(relay.Var(data[i].name_hint, type_annotation=dyn_shape))
# Extract the dynamic shape value from one of the inputs.
rt_sh = relay.op.shape_of(dyn_data[0])
flex_value = relay.op.take(rt_sh, relay.const(axis))
if_exprs = []
for i, bucket in enumerate(buckets):
input_data = dyn_data
check_dim = flex_value
# Apply automatic padding if specified.
if auto_pad:
input_data = []
# Construct padding expression for inputs.
for j, inp in enumerate(dyn_data):
pad_width = relay.const(bucket) - flex_value
rank = len(data[j].type_annotation.shape)
pads = relay.zeros([rank, 2], "int32")
pads = relay.scatter_nd(pads, relay.const([axis, 1]), pad_width)
padded_value = relay.nn.pad(inp, pads, pad_value)
# Determine if this is the proper bucket to pad to. Do this by checking if the
# input shape is between this bucket and the previous.
if i == 0:
padded_value = relay.If(
relay.op.less_equal(flex_value, relay.const(bucket)), padded_value, inp
)
else:
padded_value = relay.If(
relay.op.logical_and(
relay.op.less_equal(flex_value, relay.const(bucket)),
relay.op.greater(flex_value, relay.const(buckets[i - 1])),
),
padded_value,
inp,
)
# Update input value and test dimension to reflect possible padding.
input_data.append(padded_value)
# Grab the new possibly padded shape for checking bucket size.
check_dim = relay.op.take(relay.op.shape_of(input_data[0]), relay.const(axis))
# Create a specialized subgraph for the current bucket.
spec_call, spec_ty = specialize_body(
mod, main_fn, axis, bucket, input_indices=input_indices, affects_output=affects_output
)
# Apply hard casting to shape to create statically typed graphs.
spec_data = []
for j, inp in enumerate(input_data):
spec_data.append(relay.op.reshape(inp, spec_ty[j].shape))
# Create a dispatch statement for the current specialized graph.
call_args = list(main_fn.params)
for j, inp in enumerate(input_indices):
call_args[inp] = spec_data[j]
new_call = spec_call(*call_args)
# Remove meaningless padded outputs if applicable.
if auto_pad and affects_output:
new_call = relay.take(
new_call,
relay.arange(start=relay.const(0), stop=flex_value, dtype="int32"),
axis=axis,
)
# Add this new case to the dispatch handler.
if_exprs.append((relay.op.equal(check_dim, relay.const(bucket)), new_call))
# Create a subgraph to handle all other shapes.
default_dyn_call, _ = specialize_body(
mod, main_fn, axis, relay.Any(), input_indices=input_indices, affects_output=affects_output
)
call_args = list(main_fn.params)
for j, inp in enumerate(input_indices):
call_args[inp] = dyn_data[j]
new_body = default_dyn_call(*call_args)
# Create an If chain to dispatch shapes to the appropriate specialized subgraph.
for cond, true_branch in if_exprs:
new_body = relay.If(cond, true_branch, new_body)
# Assign new parameters to the function.
new_params = list(main_fn.params)
for j, inp in enumerate(input_indices):
new_params[inp] = dyn_data[j]
# Update the output shape to be dynamic if needed.
if affects_output:
dyn_ret_type = override_shape(main_fn.ret_type, axis, relay.Any())
else:
dyn_ret_type = main_fn.ret_type
# Assign the handler as the new entrypoint in the module.
new_main = relay.Function(
new_params, new_body, dyn_ret_type, main_fn.type_params, main_fn.attrs
)
mod["main"] = new_main
# Do type inference to make sure everything worked.
mod = relay.transform.InferType()(mod)
return mod
class FlexibleShapeDispatch(object):
"""Enable inference of multiple shaped inputs in one module.
This transformation adds a handler around a module that
checks input shapes and dispatches to a subgraph specialized
to handle the specific shapes of that input. If no exactly matching
subgraph is available, the input will be run using full dynamism.
For best performance, specify all the sizes the module will
be likely to see using the buckets argument.
By default, this pass will dispatch shapes that exactly match one
of the buckets to a corresponding subgraph. All non-matching shapes
use the same fully dynamic fallback. This can be detrimental to performance
for those non-matching shapes. Setting auto_pad to True causes this
pass to round-up the shape of non-matching inputs to the closest
bucket. This allows them to use the tuned kernels of bucket shapes
which can improve performance.
Models that have multiple inputs sharing a dynamic axis, which
is common for batch size or sequence length dynamism, are supported
through the input_indices argument.
Many types of dynamism such as batching affect both the input and output
shape, however this is not always the case. If the output shape
is independent of the input, the affects_output argument of this
pass must be set to False.
Parameters
----------
buckets: list[int]
The sizes of the input dimension that should be explicitly handled.
Each value in buckets will have a corresponding subgraph constructed to
handle it.
axis: int
The dimension of the input that should be made flexible. This will
most often be used for the batch dimension.
auto_pad: Optional[bool]
If True, then padding will be inserted to values that don't match one of
the provided buckets.
pad_value: Optional[float]
When auto_pad is true, padding will be done with this value.
input_indices: Optional[List[int]]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
ret : FlexibleShapeDispatch
A pass that can be applied to a module to add flexible shape handling.
"""
def __init__(
self,
buckets,
axis=0,
auto_pad=False,
pad_value=0,
input_indices=None,
affects_output=True,
):
self.axis = axis
self.buckets = buckets
self.auto_pad = auto_pad
self.pad_value = pad_value
self.input_indices = input_indices
self.affects_output = affects_output
super(FlexibleShapeDispatch, self).__init__()
def __call__(self, mod):
# Shape information is required for this pass.
mod = relay.transform.InferType()(mod)
return flexible_dispatch(
mod,
self.buckets,
self.axis,
self.auto_pad,
self.pad_value,
self.input_indices,
self.affects_output,
)
| 15,463 | 40.794595 | 99 | py |
tvm | tvm-main/python/tvm/relay/transform/memory_plan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""
A pass for manifesting explicit memory allocations.
"""
from typing import Optional, Dict, List, Tuple
from collections import defaultdict
import attr
from ..expr_functor import ExprMutator
from .. import op, expr
from ..function import Function
from ... import register_func, ir, cpu
from ..._ffi.runtime_ctypes import Device
from ... import IRModule
from .. import transform
from . import function_pass
def is_primitive(call):
return (
hasattr(call, "op")
and hasattr(call.op, "attrs")
and hasattr(call.op.attrs, "Primitive")
and int(call.op.attrs.Primitive) == 1
)
@attr.s(auto_attribs=True)
class Region:
"""
Represents a control-free allocation region.
The below pass groups sets of allocations into regions,
then replaces the region with a single allocation.
"""
var: expr.Var
size: expr.Expr
alignment: Optional[expr.Expr]
dtype: Optional[str]
device: Device
offsets: Dict[expr.Var, Tuple[expr.Expr, expr.Expr]]
@staticmethod
def empty(region_no):
zero = expr.const(0, dtype="int64")
assert len(zero.data.shape) == 0
region_var = expr.var(f"region{region_no}")
return Region(region_var, zero, None, None, None, {})
def grow(
self,
old_storage: expr.Var,
size: expr.Expr,
alignment: expr.Expr,
dev: Device,
dtype: str,
) -> None:
"""Grow the region by a given allocation as well as track the old storage
for later rewriting the program to use the allocated region.
"""
if self.dtype:
assert self.dtype == dtype, "must have matching dtypes in a region"
else:
self.dtype = dtype
if self.alignment:
assert ir.structural_equal(
self.alignment, alignment
), "must have matching alignments in a region"
else:
self.alignment = alignment
if self.device:
assert (
self.device.device_type == dev.device_type
and self.device.device_id == dev.device_id
), "must have matching device"
else:
assert dev
self.device = dev
new_size = (
(size + self.alignment - expr.const(1, "int64")) / self.alignment * self.alignment
)
# Record the offset at which we allocate the storage.
offset_var: expr.RelayExpr = expr.var(f"offset{len(self.offsets)}")
self.offsets[old_storage] = (offset_var, self.size)
self.size = self.size + new_size
def offset_for(self, alloc: expr.Expr) -> expr.Expr:
return self.offsets.get(alloc, [None])[0]
def to_expr(self, body: expr.Expr) -> expr.Expr:
"""
Generate the prelude code for a region, wrapping the body in it.
The prelude contains the single allocation for a region, and
all offset computations.
"""
if self.device is None:
self.device = cpu(0)
# Generate bindings for each and every size computation
# we must do this to maintain ANF.
bindings: List[Tuple[expr.Expr, expr.Expr]] = []
# First compute the total size.
total_size = expr.var(f"total_size{hash(body)}")
bindings.append((total_size, self.size))
# Allocate the entire region with a single call.
alloc = op.memory.alloc_storage(total_size, self.alignment, self.device, self.dtype)
bindings.append((self.var, alloc))
# Generate variables which contain all of the offset math.
# Ensure we constant evaluate away all the math here.
#
# In theory we can support dynamic offsets but this
# requires another round of memory planning and
# potentially colaescing.
for alloc in self.offsets:
(var, offset) = self.offsets[alloc]
bindings.append((var, offset))
body = mk_let(bindings, body)
return body
def iterative_let(let, each_binding, kont):
bindings = []
while isinstance(let, expr.Let):
lhs = let.var
rhs = let.value
bindings.append(each_binding(lhs, rhs))
let = let.body
return kont(bindings, let)
def mk_let(bindings, body):
for var, value in reversed(bindings):
assert var
assert value
assert body
body = expr.Let(var, value, body)
return body
def const_eval(mod, exp):
mod = IRModule.from_expr(exp, type_defs=mod.type_definitions)
mod = transform.FoldConstant()(mod)
return mod["main"]
class StorageCoalesce(ExprMutator):
"""
A pass for coalescing allocations into region/arena allocations.
After this pass each allocation comes from the same backing storage,
but will never overlap even in time, i.e. the allocations are just
packed into a contiguous block of memory.
A secondary part of memory planning will perform liveness analysis to
overlap these in time, i.e when an early tensor dies we will attempt
to reuse its slot.
"""
def __init__(self):
super().__init__()
self.regions = []
def enter_scope(self) -> None:
region_no = len(self.regions)
self.regions.append(defaultdict(lambda: Region.empty(region_no)))
def exit_scope(self, body: expr.Expr) -> expr.Expr:
"""When leaving a scope build a region allocation for the scope."""
dtype_region = self.regions.pop()
for _, region in reversed(list(dtype_region.items())):
if len(region.offsets) != 0:
body = region.to_expr(body)
return body
def current_region(self, dtype) -> Region:
current_scope = self.regions[-1]
return current_scope[dtype]
def new_region_and_offset(self, old_storage):
for dtype_region in reversed(self.regions):
for dtype in dtype_region:
region = dtype_region[dtype]
offset = region.offset_for(old_storage)
if offset:
return region, offset
raise Exception("could not find offset in any valid region")
def visit_function(self, fn):
"""Transform the function body to use region allocation scheme."""
func = fn
if getattr(func.attrs, "Primitive", 0) == 1:
return super().visit_function(func)
else:
self.enter_scope()
body = self.visit(func.body)
body = self.exit_scope(body)
return Function(
func.params,
body,
func.ret_type,
func.type_params,
func.attrs,
)
def visit_if(self, ite):
self.enter_scope()
true_branch = self.visit(ite.true_branch)
true_branch = self.exit_scope(true_branch)
self.enter_scope()
false_branch = self.visit(ite.false_branch)
false_branch = self.exit_scope(false_branch)
return expr.If(ite.cond, true_branch, false_branch)
def mk_let(self, dynamic_regions):
"""Let bind the dynamic regions"""
def _mk_let(bindings, body):
for var, value in reversed(bindings):
assert var
assert value is not None
assert body
body = expr.Let(var, value, body)
if var in dynamic_regions:
body = self.exit_scope(body)
return body
return _mk_let
def visit_let(self, let):
dynamic_regions = []
def _each_binding(lhs, rhs):
if isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_storage"):
return self.process_alloc_storage(dynamic_regions, lhs, rhs)
elif isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_tensor"):
return self.process_alloc_tensor(lhs, rhs)
else:
return lhs, rhs
result = iterative_let(let, _each_binding, self.mk_let(dynamic_regions))
assert result
return result
def process_alloc_storage(self, dynamic_regions, lhs, call):
"""Process alloc_storage"""
size, alignment = call.args
dtype = call.attrs.dtype
dev = Device(call.attrs.device_type, call.attrs.device_id)
if not isinstance(size, expr.Constant):
self.enter_scope()
dynamic_regions.append(lhs)
else:
# A new scope is created when entering a new region with different
# device device.
region = self.current_region(dtype)
if region.device and region.device.device_type != dev.device_type:
self.enter_scope()
dynamic_regions.append(lhs)
region = self.current_region(dtype)
region.grow(lhs, size, alignment, dev, dtype)
return lhs, region.var
def process_alloc_tensor(self, lhs, call):
"""Process alloc tensor. Region and offset are computed"""
storage, old_offset, shape = call.args
region, offset = self.new_region_and_offset(storage)
assert old_offset.data.numpy().item() == 0, "no offsets should yet be allocated"
return (
lhs,
expr.Call(call.op, [region.var, offset, shape], call.attrs),
)
class LiftConst(ExprMutator):
"""An internal pass to lift constants to the top level of function."""
def __init__(self):
self.i = 0
self.constants = []
self.top_level = True
super().__init__()
def visit_constant(self, const):
var = expr.var(f"const{self.i}")
self.i += 1
self.constants.append((var, const))
return var
def visit_function(self, fn):
if int(getattr(fn.attrs, "Primitive", 0)) == 1:
return fn
outer_constant = self.constants
self.constants = []
# Populates self.constants.
body = self.visit(fn.body)
body = mk_let(self.constants, body)
self.constants = outer_constant
return Function(fn.params, body, fn.ret_type, fn.type_params, fn.attrs)
def visit_let(self, let):
bindings = []
while isinstance(let, expr.Let):
new_var = self.visit(let.var)
new_val = self.visit(let.value)
bindings.append((new_var, new_val))
let = let.body
new_body = self.visit(let)
return mk_let(bindings, new_body)
@function_pass(opt_level=0)
class MemoryPlan:
"""An explicit pass wrapper around StorageCoalesce."""
def transform_function(self, func, mod, _):
mod.import_from_std("core.rly")
sc = StorageCoalesce()
func = sc.visit(func)
return func
register_func("relay.transform.MemoryPlan", MemoryPlan)
@function_pass(opt_level=0)
class LiftConstants:
"""An explicit pass wrapper around LiftConst."""
def transform_function(self, func, mod, _):
mod.import_from_std("core.rly")
func = LiftConst().visit(func)
return func
register_func("relay.transform.LiftConstants", LiftConstants)
| 12,036 | 30.928382 | 94 | py |
tvm | tvm-main/python/tvm/relay/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay IR namespace containing transformations."""
# transformation passes
from .transform import *
from .recast import recast
from . import fake_quantization_to_integer, mixed_precision
from .flexible_shape import FlexibleShapeDispatch
| 1,095 | 44.666667 | 66 | py |
tvm | tvm-main/python/tvm/relay/transform/fake_quantization_to_integer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay functions for rewriting fake quantized ops."""
import numpy as np
import tvm
from tvm import relay
from tvm.ir import TensorAffineType, TupleAffineType
# import to register canonicalization funcs for fq2i
# pylint: disable=unused-import
from tvm.relay.qnn.op import canonicalizations
from tvm.tir import bijective_layout
from ..op import (
register_fake_quantization_to_integer,
register_optional_fake_quantization_to_integer,
)
def fold_constant(expr):
return relay.transform.FoldConstantExpr(expr, tvm.IRModule())
def get_zeros(scale):
return fold_constant(relay.op.cast(relay.op.zeros_like(scale), "int32"))
def infer_shape(expr):
return relay.transform.InferType()(tvm.IRModule.from_expr(expr))["main"].body.checked_type.shape
def approx_equal(x, y):
x = fold_constant(x)
y = fold_constant(y)
if isinstance(x, relay.Constant) and isinstance(y, relay.Constant):
equal = np.allclose(x.data.asnumpy(), y.data.asnumpy())
else:
equal = tvm.ir.structural_equal(x, y)
return equal
@register_fake_quantization_to_integer("qnn.dequantize")
def dequantize(expr, type_map):
"""Remove dequantize op"""
out = expr.args[0]
t = type_map[expr]
return [out, t]
@register_fake_quantization_to_integer("qnn.quantize")
def quantize(expr, type_map):
"""Turn a quantize op into requantize or remove it"""
out = expr.args[0]
t = type_map[out]
in_scale = fold_constant(t.scale)
in_zero_point = fold_constant(t.zero_point)
if not (
approx_equal(in_scale, expr.args[1])
and approx_equal(in_zero_point, expr.args[2])
and tvm.ir.structural_equal(t.dtype, expr.attrs.out_dtype)
):
out = relay.qnn.op.requantize(
out,
in_scale,
in_zero_point,
expr.args[1],
expr.args[2],
out_dtype=expr.attrs.out_dtype,
axis=t.axis,
)
return [
out,
TensorAffineType(expr.args[1], expr.args[2], expr.attrs.out_dtype, expr.attrs.axis),
]
def register_unary_identity(op_name):
def identity(expr, type_map):
assert len(expr.args) == 1
arg = expr.args[0]
t = type_map[arg]
return [expr, t]
return register_fake_quantization_to_integer(op_name, identity)
register_unary_identity("reshape")
register_unary_identity("squeeze")
register_unary_identity("strided_slice")
register_unary_identity("transpose")
register_unary_identity("expand_dims")
register_unary_identity("nn.max_pool2d")
register_unary_identity("nn.batch_flatten")
register_unary_identity("nn.depth_to_space")
register_unary_identity("max")
register_unary_identity("min")
register_unary_identity("image.resize2d")
@register_fake_quantization_to_integer("nn.avg_pool2d")
def avgpool2d(expr, type_map):
"""Rewrite an avgpool op"""
attrs = {**expr.attrs}
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
out = relay.qnn.op.avg_pool2d(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
attrs["pool_size"],
attrs["strides"],
attrs["padding"],
attrs["dilation"],
attrs["ceil_mode"],
attrs["count_include_pad"],
attrs["layout"],
)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, out_t.dtype, out_t.axis)]
@register_fake_quantization_to_integer("nn.adaptive_avg_pool1d")
def adaptive_avgpool1d(expr, type_map):
"""Rewrite an adaptive avgpool op"""
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
if not (
approx_equal(t.scale, out_t.scale)
and approx_equal(t.zero_point, out_t.zero_point)
and tvm.ir.structural_equal(t.dtype, out_t.dtype)
):
arg = relay.qnn.op.requantize(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype="int32",
axis=t.axis,
)
else:
arg = relay.op.cast(arg, "int32")
output_size = expr.attrs.output_size
out = relay.op.nn.adaptive_avg_pool1d(arg, output_size)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, "int32", out_t.axis)]
@register_fake_quantization_to_integer("nn.global_avg_pool2d")
def global_avgpool2d(expr, type_map):
"""Rewrite a global_avgpool op"""
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
out_t = type_map[expr]
if not (
approx_equal(t.scale, out_t.scale)
and approx_equal(t.zero_point, out_t.zero_point)
and tvm.ir.structural_equal(t.dtype, out_t.dtype)
):
arg = relay.qnn.op.requantize(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype="int32",
axis=t.axis,
)
else:
arg = relay.op.cast(arg, "int32")
out = relay.op.nn.global_avg_pool2d(arg)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, "int32", out_t.axis)]
@register_fake_quantization_to_integer("broadcast_to")
def broadcast_to(expr, type_map):
"""Rewrite a broadcast_to op"""
arg = expr.args[0]
t = type_map[arg]
shape = expr.attrs.shape
out = relay.op.broadcast_to(arg, shape)
return [out, t]
@register_fake_quantization_to_integer("nn.bias_add")
def bias_add(expr, type_map):
"""Rewrite a bias_add op"""
x, b = expr.args
x_t = type_map[x]
if b in type_map:
# Ensure bias matches the previous op
b_t = type_map[b]
in_scale = fold_constant(x_t.scale)
in_zero_point = fold_constant(x_t.zero_point)
if not (
approx_equal(x_t.scale, b_t.scale)
and approx_equal(x_t.zero_point, b_t.zero_point)
and tvm.ir.structural_equal(x_t.dtype, b_t.dtype)
):
b = relay.qnn.op.requantize(
b, b_t.scale, b_t.zero_point, in_scale, in_zero_point, out_dtype=x_t.dtype, axis=0
)
else:
# If the bias is a constant, we need to quantize it
assert isinstance(b, relay.expr.Constant)
assert b.checked_type.dtype in ["float32", "float64", "float16", "bfloat16"]
b = relay.qnn.op.quantize(b, x_t.scale, x_t.zero_point, axis=0, out_dtype=x_t.dtype)
out = relay.op.nn.bias_add(x, b, **expr.attrs)
return [out, x_t]
@register_fake_quantization_to_integer("nn.conv2d")
def conv2d(expr, type_map):
"""Rewrite a conv2d op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.conv2d_transpose")
def conv2d_transpose(expr, type_map):
"""Rewrite a conv2d_transpose op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d_transpose(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.dense")
def dense(expr, type_map):
"""Rewrite a dense op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
dense_scale = fold_constant(x_t.scale * w_t.scale)
dense_zp = get_zeros(dense_scale)
out = relay.qnn.op.dense(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
return [out, TensorAffineType(dense_scale, dense_zp, out.attrs.out_dtype, 1)]
@register_fake_quantization_to_integer("nn.batch_matmul")
def batch_matmul(expr, type_map):
"""Rewrite a batch_matmul op"""
x, y = expr.args
x_t = type_map[x]
y_t = type_map[y]
matmul_scale = fold_constant(x_t.scale * y_t.scale)
matmul_zp = relay.const(0)
out = relay.qnn.op.batch_matmul(x, y, x_t.zero_point, y_t.zero_point, x_t.scale, y_t.scale)
return [out, TensorAffineType(matmul_scale, matmul_zp, out.attrs.out_dtype, x_t.axis)]
@register_fake_quantization_to_integer("concatenate")
def concat(expr, type_map):
"""Rewrite a concat op"""
scales = []
zps = []
tuple_type = type_map[expr.args[0]]
for t in tuple_type.types:
scales.append(t.scale)
zps.append(t.zero_point)
out_type = type_map[expr]
out = relay.qnn.op.concatenate(
expr.args[0],
relay.Tuple(scales),
relay.Tuple(zps),
out_type.scale,
out_type.zero_point,
**expr.attrs,
)
return [out, out_type]
@register_fake_quantization_to_integer("topk")
def topk(expr, type_map):
"""Rewrite a topk op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
assert "ret_type" in attrs and attrs["ret_type"] == "values"
return [expr, t]
@register_fake_quantization_to_integer("split")
def split(expr, type_map):
"""Rewrite a split op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
if isinstance(attrs["indices_or_sections"], tvm.tir.IntImm):
num_split = attrs["indices_or_sections"].value
attrs["indices_or_sections"] = num_split
else:
num_split = len(attrs["indices_or_sections"]) + 1
return [expr, TupleAffineType([t] * num_split)]
@register_fake_quantization_to_integer("clip")
def clip(expr, type_map):
"""Rewrite a clip op"""
arg = expr.args[0]
t = type_map[arg]
amin = expr.attrs.a_min
amax = expr.attrs.a_max
scale = fold_constant(t.scale)
z_p = fold_constant(t.zero_point)
if (
isinstance(scale, relay.expr.Constant)
and scale.data.numpy().size == 1
and isinstance(z_p, relay.expr.Constant)
and z_p.data.numpy().size == 1
):
scale = scale.data.numpy().item()
z_p = z_p.data.numpy().item()
new_min = int(amin / scale + z_p)
new_max = int(amax / scale + z_p)
out = relay.op.clip(arg, new_min, new_max)
else:
if not isinstance(amin, relay.expr.Constant):
amin = relay.op.const(amin)
if not isinstance(amax, relay.expr.Constant):
amax = relay.op.const(amax)
scale_shape = infer_shape(scale)
if len(scale_shape) > 0 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
amin = relay.op.reshape(relay.op.broadcast_to(amin, scale_shape), b_shape)
amax = relay.op.reshape(relay.op.broadcast_to(amax, scale_shape), b_shape)
amin = relay.qnn.op.quantize(amin, scale, z_p, t.axis, t.dtype)
amax = relay.qnn.op.quantize(amax, scale, z_p, t.axis, t.dtype)
out = relay.op.minimum(relay.op.maximum(arg, fold_constant(amin)), fold_constant(amax))
return [out, t]
@register_fake_quantization_to_integer("nn.relu")
def relu(expr, type_map):
"""Rewrite a relu op"""
arg = expr.args[0]
t = type_map[arg]
scale_shape = infer_shape(t.scale)
z_p = t.zero_point
assert len(scale_shape) <= 1
if len(scale_shape) == 1 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
z_p = relay.op.reshape(relay.op.broadcast_to(z_p, scale_shape), b_shape)
zero = relay.op.cast(z_p, t.dtype)
return [relay.op.maximum(arg, fold_constant(zero)), t]
@register_fake_quantization_to_integer("nn.leaky_relu")
def leaky_relu(expr, type_map):
"""Rewrite a leaky relu op"""
arg = expr.args[0]
x_t = type_map[arg]
out_t = type_map[expr]
alpha = expr.attrs.alpha
output = relay.qnn.op.leaky_relu(
expr, alpha, x_t.scale, x_t.zero_point, out_t.scale, out_t.zero_point
)
return [output, x_t]
@register_fake_quantization_to_integer("nn.pad")
def pad(expr, type_map):
"""Rewite an nn.pad op"""
arg = expr.args[0]
t = type_map[arg]
pad_value = expr.args[1]
# TF2ONNX will sometimes implement the pad_value as a constant without a quantize
# To support that, the pass lets branches that terminate in a constant through
if pad_value in type_map:
# if the pad value is calcuated from a dequantize op, it should be in the type map
# and we need to make sure it's affine type matches the arg
pad_t = type_map[pad_value]
if not tvm.ir.structural_equal(t, pad_t):
pad_value = relay.qnn.op.requantize(
pad_value,
pad_t.scale,
pad_t.zero_point,
t.scale,
t.zero_point,
out_dtype=t.dtype,
axis=pad_t.axis,
)
else:
# If the pad-value is a constant, we need to quantize it
assert isinstance(pad_value, relay.expr.Constant)
assert pad_value.checked_type.dtype in ["float32", "float64", "float16", "bfloat16"]
pad_value = relay.qnn.op.quantize(pad_value, t.scale, t.zero_point)
out = relay.op.nn.pad(arg, pad_value=pad_value, **expr.attrs)
return [out, t]
@register_fake_quantization_to_integer("mean")
def mean(expr, type_map):
"""Rewrite a mean op"""
arg = expr.args[0]
t = type_map[arg]
arg = relay.op.cast(arg, "int32")
out = relay.op.mean(arg, **expr.attrs)
out = relay.op.cast(out, t.dtype)
return [out, t]
def get_binary_types(expr, type_map):
"""Get Affine types of a binary op's inputs and unify them"""
# Support the case where one input is quantized and the other is a constant float
left = expr.args[0]
right = expr.args[1]
left_t = None
right_t = None
if left in type_map:
left_t = type_map[left]
if right in type_map:
right_t = type_map[right]
out_t = type_map[expr]
if left_t is None and right_t is None:
raise TypeError("neither input is quantized!")
if left_t is None:
assert isinstance(left, relay.expr.Constant)
left = relay.qnn.op.quantize(
left, right_t.scale, right_t.zero_point, out_dtype=right_t.dtype
)
left_t = right_t
if right_t is None:
assert isinstance(right, relay.expr.Constant)
right = relay.qnn.op.quantize(
right, left_t.scale, left_t.zero_point, out_dtype=left_t.dtype
)
right_t = left_t
# Handle the case of mismatched inputs
if not left_t.dtype == out_t.dtype:
out_t = left_t
return left, right, left_t, right_t, out_t
def register_binary_qnn(op_name, op):
"""Register a Binary Op that converts to QNN"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
if (
op_name == "add"
and approx_equal(left_t.scale, right_t.scale)
and approx_equal(left_t.zero_point, right_t.zero_point)
and tvm.ir.structural_equal(left_t.dtype, right_t.dtype)
and left_t.dtype == "int32"
and approx_equal(left_t.scale, out_t.scale)
and approx_equal(left_t.zero_point, out_t.zero_point)
and np.all(out_t.zero_point.data.numpy() == 0)
):
# If this add op comes after conv2d or dense, out_t.scale and out_t.zero_point
# can be a vector, which is not supported by QNN binary operators.
# In particular, the pattern of an `add` op following `dense`, where the addition is
# really a bias addtion, can come up often. We identify that pattern and convert it to
# `qnn.dense` -> `add`.
# To avoid overflow, we do this conversion only when the input data type is 32 bit (bias
# addition is typically done in 32 bit).
return [left + right, left_t]
assert len(out_t.scale.data.shape) == 0, (
f"The output scale needs to be a scalar, but got a tensor of shape "
f"{out_t.scale.data.shape}"
)
assert len(out_t.zero_point.data.shape) == 0, (
f"The output zero point needs to be a scalar, but got a tensor of shape "
f"{out_t.zero_point.data.shape}"
)
out = op(
left,
right,
left_t.scale,
left_t.zero_point,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
left_t.axis,
right_t.axis,
)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
# Use lambdas here to avoid a circular import problem
# pylint: disable=unnecessary-lambda
register_binary_qnn("add", lambda *args: relay.qnn.op.add(*args))
register_binary_qnn("multiply", lambda *args: relay.qnn.op.mul(*args))
register_binary_qnn("subtract", lambda *args: relay.qnn.op.subtract(*args))
def register_binary_identity(op_name, op):
"""Register a binary op that works directly on int8"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
if left_t != out_t:
left = relay.qnn.op.requantize(
left,
left_t.scale,
left_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=left_t.axis,
)
if right_t != out_t:
right = relay.qnn.op.requantize(
right,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=right_t.axis,
)
out = op(left, right)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
register_binary_identity("minimum", relay.op.minimum)
register_binary_identity("maximum", relay.op.maximum)
def register_unary_qnn(op_name, op):
"""Rewrite a unary op"""
def unary(expr, type_map):
arg = expr.args[0]
x_t = type_map[arg]
out_t = type_map[expr]
out = op(arg, x_t.scale, x_t.zero_point, out_t.scale, out_t.zero_point)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, unary)
register_unary_qnn("sqrt", relay.qnn.op.sqrt)
register_unary_qnn("rsqrt", relay.qnn.op.rsqrt)
register_unary_qnn("exp", relay.qnn.op.exp)
register_unary_qnn("erf", relay.qnn.op.erf)
register_unary_qnn("sigmoid", relay.qnn.op.sigmoid)
register_unary_qnn("hardswish", relay.qnn.op.hardswish)
register_unary_qnn("tanh", relay.qnn.op.tanh)
register_unary_qnn("abs", relay.qnn.op.abs)
register_unary_qnn("log", relay.qnn.op.log)
@register_fake_quantization_to_integer("take")
def take(expr, type_map):
"""Rewrite a take op"""
arg = expr.args[0]
indices = expr.args[1]
t = type_map[arg]
out = relay.op.take(arg, indices, **expr.attrs)
return [out, t]
@register_optional_fake_quantization_to_integer("nn.softmax")
def softmax(expr, type_map):
"""Rewrite a softmax op"""
arg = expr.args[0]
arg_t = type_map[arg]
out_t = type_map[expr]
out = relay.qnn.op.softmax(
arg, arg_t.scale, arg_t.zero_point, out_t.scale, out_t.zero_point, **expr.attrs
)
return [out, out_t]
| 20,881 | 31.988942 | 100 | py |
tvm | tvm-main/python/tvm/relay/quantize/_calibrate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find scales for quantization on the dataset."""
from __future__ import absolute_import
import logging
import multiprocessing as mp
import numpy as np
import tvm
import tvm.driver
from tvm.ir import IRModule
from . import _quantize
from . import quantize
from .. import op as _op
from .. import expr as _expr
from .. import analysis as _analysis
from .. import build_module as _build_module
from ...contrib import graph_executor
from .kl_divergence import _find_scale_by_kl
def _get_profile_runtime(mod):
func = mod["main"]
func = _quantize.CreateStatsCollector(func)
if tvm.target.Target.current():
target = tvm.target.Target.current()
dev = tvm.device(target.kind.name)
else:
target = "llvm"
dev = tvm.device(target)
with tvm.transform.PassContext(opt_level=3):
lib = _build_module.build(func, target=target)
runtime = graph_executor.GraphModule(lib["default"](dev))
return runtime
def collect_stats(mod, dataset, chunk_by=-1):
"""Given an annotated graph, create a profile graph to collect profile data from the
calibration dataset. This pass collects simulated_quantize op input into a tuple.
Simulated_quantize ops are rewritten to identity mode. The tuple is the output of the profile
graph.
Parameters
----------
mod: Module
The simulation graph after annotation.
dataset: Iterable[NDArray]
The calibration dataset.
chunk_by: optional, int
The size of chunk to be returned in one iteration. It is meant to be
used for reducing memory usage. If not specified, return samples for
all layers in one chunk.
Returns
-------
ret: Iterable[list of ndarray]
List of output data of each layer, chunked by the chunk_by parameter
"""
logging.info("collecting statistics for calibration...")
runtime = _get_profile_runtime(mod)
num_outputs = runtime.get_num_outputs()
chunk_by = num_outputs if chunk_by == -1 else chunk_by
for i in range(0, num_outputs, chunk_by):
outputs = [[] for i in range(min(chunk_by, num_outputs - i))]
for batch in dataset:
runtime.set_input(**batch)
runtime.run()
for j in range(i, min(i + chunk_by, num_outputs)):
outputs[j - i].append(runtime.get_output(j).numpy())
yield [np.concatenate(output).reshape(-1) for output in outputs]
def _kl_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with kl for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(_find_scale_by_kl, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def _find_scale_by_percentile(arr, percentile=0.99999):
assert isinstance(arr, np.ndarray)
x = np.abs(arr)
max_k = int(x.size * percentile)
return np.partition(x, max_k)[max_k]
def _percentile_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with percentile for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(_find_scale_by_percentile, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def _set_params(mod, input_scale_func, weight_scale_func):
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
cfg = quantize.current_qconfig()
const_params = {}
def visit_func(expr):
"""visitor function for traverse"""
if isinstance(expr, _expr.Call) and expr.op == quantize_op:
_, ndom_scale, nclip_min, nclip_max = expr.args
attrs = expr.attrs
kind = attrs.kind
nbit = cfg.get_nbit_by_kind(kind)
valid_bit = nbit - attrs.sign
# set scale
if kind == quantize.QAnnotateKind.WEIGHT:
assert isinstance(expr.args[0], _expr.Constant)
scale = weight_scale_func(expr)
else:
scale = input_scale_func(expr)
def _make_const(val):
return _expr.const(val, "float32")
valid_range = 2**valid_bit
const_params[ndom_scale] = _make_const(scale / valid_range)
const_params[nclip_min] = _make_const(-(valid_range - 1))
const_params[nclip_max] = _make_const((valid_range - 1))
main_func = mod["main"]
_analysis.post_order_visit(main_func, visit_func)
main_func = _expr.bind(main_func, const_params)
func_dict = {}
for global_var, func in mod.functions.items():
if global_var.name_hint != "main":
func_dict[global_var] = func
return IRModule.from_expr(main_func, func_dict)
# weight scale functions
def _power2_scale(sq_call): # pylint: disable=unused-argument
"""calculate weight scale with nearest mode-2 scale"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return 2 ** np.math.ceil(np.math.log(val, 2)) if val > 0 else 1.0
def _max_scale(sq_call):
"""calculate weight scale with maximum absolute value"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return val
# input scale functions
def _global_scale(sq_call): # pylint: disable=unused-argument
cfg = quantize.current_qconfig()
return cfg.global_scale
def calibrate(dataset=None):
"""The calibrate procedure will try to calculate the content of
dom_scale, nbit, clip_min, clip_max for every `simulated_quantize`
operator.
Parameters
---------
dataset: Optional[Iterable[NDArray]]
The calibration dataset.
Returns
-------
ret: Function
The module pass function.
"""
def wrapped_func(mod, _):
"""make transform.module pass happy"""
cfg = quantize.current_qconfig()
if cfg.calibrate_mode == "kl_divergence":
input_scale_func = _kl_scale(mod, dataset)
elif cfg.calibrate_mode == "global_scale":
input_scale_func = _global_scale
elif cfg.calibrate_mode == "percentile":
input_scale_func = _percentile_scale(mod, dataset)
else:
raise ValueError(f"Unknown calibrate mode {cfg.calibrate_mode}")
if cfg.weight_scale == "max":
weight_scale_func = _max_scale
elif cfg.weight_scale == "power2":
weight_scale_func = _power2_scale
else:
raise ValueError(f"Unknown weight scale mode {cfg.weight_scale}")
return _set_params(mod, input_scale_func, weight_scale_func)
return wrapped_func
| 7,850 | 31.849372 | 97 | py |
tvm | tvm-main/python/tvm/relay/quantize/kl_divergence.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find optimal scale for quantization by minimizing KL-divergence"""
import ctypes
import numpy as np
from . import _quantize
def _find_scale_by_kl(arr, quantized_dtype="int8", num_bins=8001, num_quantized_bins=255):
"""Given a tensor, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
assert isinstance(arr, np.ndarray)
min_val = np.min(arr)
max_val = np.max(arr)
thres = max(abs(min_val), abs(max_val))
if min_val >= 0 and quantized_dtype in ["uint8"]:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
def get_pointer(arr, ctypes_type):
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes_type))
return ctypes.cast(ptr, ctypes.c_void_p)
hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-thres, thres))
hist_ptr = get_pointer(hist.astype(np.int32), ctypes.c_int)
hist_edges_ptr = get_pointer(hist_edges, ctypes.c_float)
return _quantize.FindScaleByKLMinimization(
hist_ptr, hist_edges_ptr, num_bins, num_quantized_bins
)
| 2,129 | 39.188679 | 98 | py |
tvm | tvm-main/python/tvm/relay/quantize/_partition_conversions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Utilities for partitioning input quantization and output dequantization expressions."""
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
# operators that are allowed in prefix/suffix partitions, because they are used
# to quantize/dequantize
ALLOWED_CONVERSION_OPS = ["add", "multiply", "right_shift", "clip", "round", "cast"]
def partition_conversions(mod, quantized_dtypes, ensure_fully_integral):
"""Partition mod into input quantization, core quantized inference, and output dequantization.
The resulting module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
mod : tvm.IRModule
Quantized module to partition
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
ensure_fully_integral : bool
Whether to raise an exception if there are unquantized operators in the result
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization (`quantize_inputs`), core
quantized inference (`quantized_main`), output dequantization
(`dequantize_outputs`), and full quantized inference functions
"""
# Partitioning is implemented as in the diagram below:
#
# +----------------------------+
# |Quantized Inference Function|
# +--------------+-------------+
# |
# partition_prefix
# |
# +-----+-------------------------+
# | |
# +--------v---------+ +-----------------v------------------+
# |Input Quantization| |Rest of Quantized Inference Function|
# +------------------+ +-----------------+------------------+
# |
# partition_suffix
# |
# +------+---------------------+
# | |
# +------------------+ +----------v------------+ +-----------v---------+
# |Input Quantization| |Core Quantized Function| |Output Dequantization|
# +------------------+ +-----------------------+ +---------------------+
#
# The final module contains all three partitions, as well as a
# `main` function that composes these three functions (depicted below).
#
# +--------------------+-------------------------+-----------------------+
# | Input Quantization | Core Quantized Function | Output Dequantization |
# +--------------------+-------------------------+-----------------------+
assert len(mod.functions) == 1
pre_mod, mid_mod = partition_prefix(mod, quantized_dtypes)
mid_mod, post_mod = partition_suffix(mid_mod, quantized_dtypes)
if ensure_fully_integral:
assert has_only_conversion_ops(pre_mod["main"])
assert relay.analysis.all_dtypes(mid_mod["main"]).issubset(quantized_dtypes)
assert has_only_conversion_ops(post_mod["main"])
return fuse_partitions(pre_mod, mid_mod, post_mod)
def fuse_partitions(pre_mod, mid_mod, post_mod):
"""Combine prefix, middle, and suffix modules into a single module.
The combined module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
pre_mod : tvm.IRModule
Module containing an input quantization function
mid_mod : tvm.IRModule
Module containing core of a quantized inference function
post_mod : tvm.IRModule
Module containing an output dequantization function
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization, core quantized inference,
output dequantization, and full quantized inference functions
"""
pre_func = pre_mod["main"]
mid_func = mid_mod["main"]
post_func = post_mod["main"]
# create a module containing the prefix, middle, and suffix partitions
fused_mod = tvm.IRModule(
functions={
relay.GlobalVar("quantize_inputs"): pre_func,
relay.GlobalVar("quantized_main"): mid_func,
relay.GlobalVar("dequantize_outputs"): post_func,
}
)
# construct a `main` that strings together the partitions, such that its
# behaviour is equivalent to `main` in an *unpartitioned* module
scope_builder = relay.ScopeBuilder()
fused_mod_main_params = [relay.Var(param.name_hint) for param in pre_func.params]
quantized_inputs = scope_builder.let(
"quantized_inputs",
relay.Call(fused_mod.get_global_var("quantize_inputs"), fused_mod_main_params),
)
quantized_outputs = scope_builder.let(
"quantized_outputs",
relay.Call(
fused_mod.get_global_var("quantized_main"),
[relay.TupleGetItem(quantized_inputs, i) for i in range(len(pre_func.ret_type.fields))],
),
)
dequantized_outputs = scope_builder.let(
"dequantized_outputs",
relay.Call(fused_mod.get_global_var("dequantize_outputs"), [quantized_outputs]),
)
scope_builder.ret(dequantized_outputs)
fused_mod["main"] = relay.Function(fused_mod_main_params, scope_builder.get())
return relay.transform.InferType()(fused_mod)
class PrefixCutter(ExprMutator):
"""A mutator for extracting input quantization expressions from a function
The result of `visit` is the core function, and the input quantization
expressions are stored in the `prefix_sb` scope builder.
"""
def __init__(self, params, quantized_dtypes):
ExprMutator.__init__(self)
self.params = set(params)
self.quantized_dtypes = quantized_dtypes
self.subtree_params = set()
self.new_func_params = []
self.prefix_sb = relay.ScopeBuilder()
self.prefix_binding_map = {}
def visit_var(self, var):
if var in self.params:
self.subtree_params.add(var)
return var
def visit_call(self, call):
# TODO(weberlo) use graph pattern matching?
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
new_args = []
for arg in call.args:
new_arg = self.visit(arg)
if len(self.subtree_params) == 0:
new_args.append(new_arg)
else:
assert len(self.subtree_params) == 1
param = next(iter(self.subtree_params))
pre_param = self.prefix_sb.let(param.name_hint, new_arg)
self.subtree_params.clear()
mid_param = relay.Var(param.name_hint, arg.checked_type)
self.prefix_binding_map[mid_param] = pre_param
# return new parameter, then we can use
# relay.analysis.free_vars at the end of the pass to generate
# new `mid_func` type signature
new_args.append(mid_param)
return relay.Call(call.op, new_args, call.attrs)
return super().visit_call(call)
def partition_prefix(mod, quantized_dtypes):
"""Extract input quantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
prefix_cutter = PrefixCutter(func.params, quantized_dtypes)
mid_body = prefix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
mid_func = relay.Function(relay.analysis.free_vars(mid_body), mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = relay.transform.InferType()(mid_mod)
scope_builder = prefix_cutter.prefix_sb
# make sure we pass through all inputs in the prefix function's return expr
# (even those that don't require quantization)
ret_expr = []
for param in mid_func.params:
if param in prefix_cutter.prefix_binding_map:
# this param required a conversion, so we collected it in the
# prefix cutter pass, and we can use the pass's mapping from mid
# func params to pre func params
ret_expr.append(prefix_cutter.prefix_binding_map[param])
else:
# there was no detected conversion for this argument, so we thread
# it through the prefix function untouched
ret_expr.append(relay.Var(param.name_hint, param.checked_type))
ret_expr = relay.Tuple(ret_expr)
scope_builder.ret(ret_expr)
pre_func_body = scope_builder.get()
pre_func = relay.Function(relay.analysis.free_vars(pre_func_body), pre_func_body)
pre_mod = tvm.IRModule.from_expr(pre_func)
pre_mod = relay.transform.InferType()(pre_mod)
return pre_mod, mid_mod
class SuffixCutter(ExprMutator):
"""A mutator for extracting output dequantization expressions from a function
The result of `visit` is a function containing the output dequantization
expressions, and the middle of the function is stored in `mid_body`.
"""
def __init__(self, quantized_dtypes):
ExprMutator.__init__(self)
self.mid_body = None
self.quantized_dtypes = quantized_dtypes
def visit(self, expr):
if hasattr(expr, "checked_type") and expr.checked_type.dtype in self.quantized_dtypes:
self.mid_body = expr
return relay.Var("input", expr.checked_type)
return super().visit(expr)
def partition_suffix(mod, quantized_dtypes):
"""Extract output dequantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
suffix_cutter = SuffixCutter(quantized_dtypes)
post_body = suffix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
post_func = relay.Function(relay.analysis.free_vars(post_body), post_body, func.ret_type)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = relay.transform.InferType()(post_mod)
mid_body = suffix_cutter.mid_body
if mid_body is None:
# The suffix contains the entire function, meaning there was no
# quantization boundary in the given mod. In this case, we use the
# suffix mod as the middle mod and make the suffix an identity function.
mid_mod = post_mod
post_body = relay.Var("input", mid_mod["main"].ret_type)
post_func = relay.Function([post_body], post_body)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = relay.transform.InferType()(post_mod)
else:
mid_func = relay.Function(func.params, mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = relay.transform.InferType()(mid_mod)
return mid_mod, post_mod
class ConversionOpChecker(ExprVisitor):
"""A pass for checking that the visited function contains only conversion ops"""
def __init__(self):
ExprVisitor.__init__(self)
self.valid = True
def visit_call(self, call):
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
self.valid = False
super().visit_call(call)
def has_only_conversion_ops(func):
"""Return true iff the given function contains only quantization/dequantization ops.
Parameters
----------
func : relay.Function
Function being checked
Returns
-------
valid : bool
Whether the function contains only conversion ops
"""
checker = ConversionOpChecker()
checker.visit(func)
return checker.valid
| 13,371 | 37.985423 | 100 | py |
tvm | tvm-main/python/tvm/relay/quantize/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""Automatic quantization utilities."""
from __future__ import absolute_import as _abs
from .quantize import *
from ._partition import register_partition_function
from ._annotate import register_annotate_function
| 1,052 | 42.875 | 62 | py |
tvm | tvm-main/python/tvm/relay/quantize/quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic quantization toolkit."""
import tvm.ir
import tvm
from tvm.runtime import Object
from . import _quantize
from ._calibrate import calibrate
from ._partition_conversions import partition_conversions
from .. import expr as _expr
from .. import transform as _transform
class QAnnotateKind(object):
"""Denote the kind of annotation field, corresponding
to different nbit configure."""
IDENTITY = 0
INPUT = 1
WEIGHT = 2
ACTIVATION = 3
def kind2str(kind):
"""Convert a `QAnnotateKind` to string"""
str_map = {
QAnnotateKind.INPUT: "input",
QAnnotateKind.WEIGHT: "weight",
QAnnotateKind.ACTIVATION: "activation",
QAnnotateKind.IDENTITY: "identity",
}
assert kind in str_map
return str_map[kind]
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(ref_call.op, args, ref_call.attrs, ref_call.type_args, ref_call.span)
@tvm._ffi.register_object("relay.quantize.QConfig")
class QConfig(Object):
"""Configure the quantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use qconfig instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. See _node_defaults for the fields.
"""
_node_defaults = {
"nbit_input": 8,
"nbit_weight": 8,
"nbit_activation": 32,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
"calibrate_mode": "global_scale",
"global_scale": 8.0,
"weight_scale": "power2",
"skip_dense_layer": True,
"skip_conv_layers": [0],
"do_simulation": False,
"round_for_shift": True,
"debug_enabled_ops": None,
"rounding": "UPWARD",
"calibrate_chunk_by": -1,
"partition_conversions": "disabled",
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(QConfig, self).__init__(handle)
self.handle = handle
def guard(self, ref_call):
"""Return true if op is enabled, otherwise return false"""
op_name = ref_call.op.name
if self.debug_enabled_ops is not None:
name_list = [x.value for x in self.debug_enabled_ops]
if op_name not in name_list:
return False
return True
def get_nbit_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "nbit_" + name)
def get_dtype_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "dtype_" + name)
def __enter__(self):
# pylint: disable=protected-access
_quantize._EnterQConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_quantize._ExitQConfigScope()
def __setattr__(self, name, value):
if name in QConfig._node_defaults:
raise AttributeError(f"'{type(self)}' object cannot set attribute '{name}'")
return super(QConfig, self).__setattr__(name, value)
def current_qconfig():
"""Get the current quantization configuration."""
return _quantize._GetCurrentQConfig()
def qconfig(**kwargs):
"""Configure the quantization behavior by setting config variables.
Parameters
---------
nbit_dict: dict of QAnnotateKind -> int
Number of bit for every kind of annotate field.
calibrate_mode: str
The calibration mode. 'global_scale' or 'kl_divergence'.
global_scale: use global scale
kl_divergence: find scales by kl divergence on the dataset.
global_scale: float
The global scale for calibration.
weight_scale: str
The way to calculate scales for weights (annotated with QAnnotateKind.WEIGHT).
power2: Find the maximum of the absolute value of the tensor, and then round up to power
of two.
max: Find the maximum of the absolute value of the tensor
skip_dense_layer: boolean
Whether to skip all nn.dense layer type. By default are skipped.
skip_conv_layers: list
Specifying which layers to be skipped. Provide a list of indices
that indicate which conv2d layers to leave untouched. Start from 0.
do_simulation: boolean
Whether to do simulation with float operation only.
round_for_shift: boolean
Whether to add bias for rounding during shift.
debug_enabled_ops: None or list of str
Partially quantize specified operators for debugging. The default value
is None, which means will try to call all operartors' annotate rewrite
function.
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
partition_conversions: 'disabled', 'enabled', or 'fully_integral'
If set to 'enabled' or 'fully_integral', partitions a quantized
result into a module containing
a prefix function (consisting of input conversion into the quantized data space),
a middle function (consisting of the core quantized network),
a suffix function (consisting of output dequantization),
and a main function (that calls the prefix, middle, and suffix functions in succession).
If set to 'fully_integral' and there are unquantized operators in the result,
an exception is raised.
The default value is 'disabled'.
Returns
-------
config: QConfig
The quantization configuration
"""
node_args = {k: v if k not in kwargs else kwargs[k] for k, v in QConfig._node_defaults.items()}
return tvm.ir.make_node("relay.quantize.QConfig", **node_args)
class QuantizeContext(object):
"""An internal used global context object for annotation,
for putting some state variables like `conv2d_counter`."""
Current = None
def __init__(self):
self.qnode_map = dict()
self._conv2d_counter = 0
self._stop_quantize = False
def check_to_skip(self, ref_call):
"""Check the index of conv2d layer to decide whether to
skip the current operator."""
if self._stop_quantize:
return True
if current_qconfig().skip_conv_layers is not None:
# check skip conv layers
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if self._conv2d_counter in skipped_indices and ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return True
if ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return False
def stop_quantize(self):
self._stop_quantize = True
def reset(self):
self._conv2d_counter = 0
self._stop_quantize = False
def __enter__(self):
self.reset()
return self
def __exit__(self, ptype, value, traceback):
pass
def quantize_context():
"""Get the global singleton scope"""
if QuantizeContext.Current is None:
QuantizeContext.Current = QuantizeContext()
return QuantizeContext.Current
def partition():
"""Partition graph into small low-precision sections by `cast_hint` and
`stop_fusion`.
Returns
-------
ret: tvm.transform.Pass
The registered pass for VTA rewrite.
"""
return _quantize.QuantizePartition()
def annotate():
"""Given a float32 graph, this pass will rewrite the graph and return
a graph which simulates the error brought by the current quantization
scheme.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization annotation.
"""
return _quantize.QuantizeAnnotate()
def realize():
"""The realize pass will transform the simulated quantized graph, which
actually computes with float32, to a real low-bit integer graph. It will
replace the `simulated_quantize` with several fine-grained operators like
add, multiply, and shift as much as possible for better performance.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization realization.
"""
return _quantize.QuantizeRealize()
def _bind_params(func, params):
"""Bind the params to the expression."""
name_dict = {}
for arg in func.params:
name = arg.name_hint
if name in name_dict:
name_dict[name] = None
else:
name_dict[name] = arg
bind_dict = {}
for k, v in params.items():
if k not in name_dict:
continue
arg = name_dict[k]
if arg is None:
raise ValueError(f"Multiple args in the function have name {k}")
bind_dict[arg] = _expr.const(v)
return _expr.bind(func, bind_dict)
def prerequisite_optimize(mod, params=None):
"""Prerequisite optimization passes for quantization. Perform
"SimplifyInference", "FoldScaleAxis", "FoldConstant", and
"CanonicalizeOps" optimization before quantization."""
optimize = tvm.transform.Sequential(
[
_transform.SimplifyInference(),
_transform.FoldConstant(),
_transform.FoldScaleAxis(),
_transform.CanonicalizeOps(),
_transform.FoldConstant(),
]
)
if params:
mod["main"] = _bind_params(mod["main"], params)
mod = optimize(mod)
return mod
def quantize(mod, params=None, dataset=None):
"""The quantization procedure. Before running the three main
procedure of quantization, "annotate", "calibrate" and "realize"
, we need to do "SimplifyInference", "FoldScaleAxis", "FoldConstant"
first for optimizing.
Parameters
---------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
dataset: list of dict of Var -> NDArray
The calibration dataset.
Returns
-------
ret: Function
The graph after quantization
"""
mod = prerequisite_optimize(mod, params)
calibrate_pass = tvm.transform.module_pass(
calibrate(dataset), opt_level=1, name="QuantizeCalibrate"
)
quant_passes = [partition(), annotate(), calibrate_pass, tvm.relay.transform.InferType()]
if not current_qconfig().do_simulation:
quant_passes.append(realize())
quant_passes.append(_transform.FoldConstant())
quantize_seq = tvm.transform.Sequential(quant_passes)
with tvm.transform.PassContext(
opt_level=3, required_pass=["QuantizeAnnotate", "QuantizeCalibrate", "QuantizeRealize"]
):
with quantize_context():
mod = quantize_seq(mod)
q_cfg = current_qconfig()
assert q_cfg.partition_conversions in ["disabled", "enabled", "fully_integral"]
if q_cfg.partition_conversions != "disabled":
quantized_dtypes = {q_cfg.dtype_input, q_cfg.dtype_weight, q_cfg.dtype_activation}
ensure_fully_integral = q_cfg.partition_conversions == "fully_integral"
return partition_conversions(mod, quantized_dtypes, ensure_fully_integral)
return mod
| 12,297 | 31.363158 | 99 | py |
tvm | tvm-main/python/tvm/relay/quantize/_annotate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
import warnings
from tvm import topi
import tvm._ffi
from tvm.relay.op import op as _reg
from .. import expr as _expr
from .. import analysis as _analysis
from .. import op as _op
from . import _quantize
from .quantize import QAnnotateKind, current_qconfig, quantize_context
from .quantize import _forward_op
@_op.register_compute("relay.op.annotation.simulated_quantize")
def simulated_quantize_compute(attrs, inputs, out_type):
"""Compiler for simulated_quantize."""
assert len(inputs) == 4
assert attrs.sign
assert attrs.rounding == "round"
data, scale, clip_min, clip_max = inputs
if attrs.kind == QAnnotateKind.IDENTITY:
return [topi.identity(data)]
# simulate rounding error
scaled_data = topi.divide(data, scale)
clipped_data = topi.maximum(topi.minimum(scaled_data, clip_max), clip_min)
round_data = topi.round(clipped_data)
# recover data
rdata = topi.multiply(round_data, scale)
return [rdata]
_reg.register_injective_schedule("relay.op.annotation.simulated_quantize")
_reg.register_pattern("relay.op.annotation.simulated_quantize", _reg.OpPattern.ELEMWISE)
_reg.register_injective_schedule("annotation.cast_hint")
@tvm._ffi.register_object("relay.QAnnotateExpr")
class QAnnotateExpr(_expr.TempExpr):
"""A special kind of Expr for Annotating.
Parameters
---------
expr: Expr
the original relay ir expr.
kind: QAnnotateKind
the kind of annotation field.
"""
def __init__(self, expr, kind):
self.__init_handle_by_constructor__(_quantize.make_annotate_expr, expr, kind)
def _get_expr_kind(anno):
"""Get the expression and QAnnotateKind from QAnnotateExpr or Expr"""
if isinstance(anno, QAnnotateExpr):
return anno.expr, anno.kind
return anno, None
def register_annotate_function(op_name, frewrite=None, level=10):
"""register a rewrite function for operator, used by annotation.
Parameters
---------
op_name: str
The name of operation
frewrite : function, optional
The function to be registered.
level : int, optional
The priority level
"""
def default_rewrite(ref_call, new_args, ctx):
# recover from QAnnotateExpr
args = [_get_expr_kind(x)[0] for x in new_args]
return _forward_op(ref_call, args)
def _register(func):
"""internal register function"""
def frewrite_with_guard(ref_call, new_args, ctx):
if not current_qconfig().guard(ref_call):
return default_rewrite(ref_call, new_args, ctx)
return func(ref_call, new_args, ctx)
return tvm.ir.register_op_attr(op_name, "FQAnnotateRewrite", frewrite_with_guard, level)
return _register(frewrite) if frewrite is not None else _register
def attach_simulated_quantize(data, kind, sign=True, rounding="round"):
"""Attach a simulated quantize operation after input data expr.
Parameters
---------
data: Expr
the original data expr.
kind: QAnnotateKind
the kind of annotation field.
"""
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
if isinstance(data, _expr.Call) and data.op == quantize_op:
if data.attrs.kind == kind and data.attrs.sign == sign and data.attrs.rounding == rounding:
return data
qctx = quantize_context()
key = tuple([data, kind, sign, rounding])
if key in qctx.qnode_map:
return qctx.qnode_map[key]
dom_scale = _expr.var("dom_scale")
clip_min = _expr.var("clip_min")
clip_max = _expr.var("clip_max")
qnode = _quantize.simulated_quantize(data, dom_scale, clip_min, clip_max, kind, sign, rounding)
qctx.qnode_map[key] = qnode
return qnode
tvm._ffi.register_func("relay.quantize.attach_simulated_quantize", attach_simulated_quantize)
@register_annotate_function("nn.contrib_conv2d_NCHWc")
def conv2d_nchwc_rewrite(ref_call, new_args, ctx):
warnings.warn(
"NCHWc layout Conv2D detected, please use a lower "
"optimization level before applying the quantization "
"pass as quantization will have no effect here..."
)
@register_annotate_function("nn.conv2d")
def conv2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv2d. Lhs of conv will be quantized to
input field, and rhs of conv will be quantized to weight field.
Output would be in activation field"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.conv1d")
def conv1d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv1d. Lhs of conv will be quantized to
input field, and rhs of conv will be quantized to weight field.
Output would be in activation field"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.dense")
def dense_rewrite(ref_call, new_args, ctx):
"""Rewrite function for dense. Lhs of dense will be quantized to input field, and rhs of
dense will be quantized to weight field. Output would be in activation field."""
if current_qconfig().skip_dense_layer:
return None
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("multiply")
def multiply_rewrite(ref_call, new_args, ctx):
"""Rewrite function for multiply."""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
return None
if lhs_kind in [QAnnotateKind.ACTIVATION, QAnnotateKind.INPUT] and rhs_kind is None:
# quantize lhs to INPUT field
if lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
if _analysis.check_constant(rhs_expr):
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if rhs_kind in [QAnnotateKind.ACTIVATION, QAnnotateKind.INPUT] and lhs_kind is None:
# quantize rhs to INPUT field
if rhs_kind == QAnnotateKind.ACTIVATION:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
if _analysis.check_constant(lhs_expr):
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.WEIGHT)
else:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError
@register_annotate_function("add")
def add_rewrite(ref_call, new_args, ctx):
"""Rewrite function for add."""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
# trivial case
return None
if lhs_kind is None and rhs_kind is not None:
# quantize lhs to INPUT field if it is normal expression
assert rhs_kind in [QAnnotateKind.INPUT, QAnnotateKind.ACTIVATION]
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind is not None and rhs_kind is None:
if _analysis.check_constant(rhs_expr):
# - introduced by batch_norm: add(out, const)
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if lhs_kind is not None and rhs_kind is not None:
if lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.INPUT:
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.ACTIVATION:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if (lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.INPUT) or (
lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.ACTIVATION
):
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError()
def identity_rewrite(ref_call, new_args, ctx):
"""Simply forward the original operation"""
if quantize_context().check_to_skip(ref_call):
return None
x_expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
ret_expr = _forward_op(ref_call, [x_expr])
return QAnnotateExpr(ret_expr, x_kind)
register_annotate_function("reshape", identity_rewrite)
register_annotate_function("clip", identity_rewrite)
register_annotate_function("nn.relu", identity_rewrite)
register_annotate_function("strided_slice", identity_rewrite)
register_annotate_function("nn.avg_pool2d", identity_rewrite)
register_annotate_function("nn.batch_flatten", identity_rewrite)
register_annotate_function("transpose", identity_rewrite)
register_annotate_function("annotation.stop_fusion", identity_rewrite)
def pool2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for max pool2d"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
register_annotate_function("nn.max_pool2d", pool2d_rewrite)
def pool1d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for max pool1d"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
register_annotate_function("nn.max_pool1d", pool1d_rewrite)
@register_annotate_function("annotation.cast_hint")
def cast_hint_rewrite(ref_call, new_args, ctx):
"""Rewrite function to force cast"""
expr, x_kind = _get_expr_kind(new_args[0])
if quantize_context().check_to_skip(ref_call):
return expr
if x_kind is None:
return new_args[0]
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
@register_annotate_function("concatenate")
def concatenate_rewrite(ref_call, new_args, ctx):
"""Rewrite function for concatenate"""
if quantize_context().check_to_skip(ref_call):
return None
input_tuple = new_args[0]
expr_list = [_get_expr_kind(x)[0] for x in input_tuple]
kind_list = [_get_expr_kind(x)[1] for x in input_tuple]
# make sure the inputs of concatenate are all normal
# expression or annotate expression
if all([k is None for k in kind_list]):
return None
for i, k in enumerate(kind_list):
if k is None:
expr_list[i] = attach_simulated_quantize(expr_list[i], QAnnotateKind.ACTIVATION)
expr = _forward_op(ref_call, [_expr.Tuple(expr_list)])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.global_avg_pool2d")
def global_avg_pool2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for global_avg_pool2d for stopping quantize"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
expr = _forward_op(ref_call, [new_args[0].realize()])
# stop quantize after global_avg_pool2d
quantize_context().stop_quantize()
return expr
@register_annotate_function("nn.batch_matmul")
def batch_matmul_rewrite(ref_call, new_args, ctx):
"""Rewrite function for batch_matmul"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
if _analysis.check_constant(lhs_expr):
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.WEIGHT)
else:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
if rhs_kind is None or rhs_kind == QAnnotateKind.ACTIVATION:
if _analysis.check_constant(rhs_expr):
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
| 15,951 | 34.847191 | 99 | py |
tvm | tvm-main/python/tvm/relay/quantize/_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
import tvm
from .. import expr as _expr
from .. import analysis as _analysis
from . import _quantize
from .quantize import _forward_op
def register_partition_function(op_name, frewrite=None, level=10):
return tvm.ir.register_op_attr(op_name, "FQPartitionRewrite", frewrite, level)
@tvm._ffi.register_object("relay.QPartitionExpr")
class QPartitionExpr(_expr.TempExpr):
def __init__(self, expr):
self.__init_handle_by_constructor__(_quantize.make_partition_expr, expr)
def partition_expr_check(expr):
if isinstance(expr, QPartitionExpr):
return True, expr.expr
return False, expr
@register_partition_function("nn.conv2d")
def conv2d_partition_function(ref_call, new_args, ctx):
"""Rewrite function for conv2d for partition"""
data_cond, data = partition_expr_check(new_args[0])
kernel_cond, kernel = partition_expr_check(new_args[1])
assert not kernel_cond
if data_cond:
data = new_args[0].realize()
ret = _forward_op(ref_call, [data, kernel])
return QPartitionExpr(ret)
def identity_partition_function(ref_call, new_args, ctx):
cond, expr = partition_expr_check(new_args[0])
if cond:
return QPartitionExpr(_forward_op(ref_call, [expr]))
return None
register_partition_function("clip", identity_partition_function)
register_partition_function("nn.relu", identity_partition_function)
register_partition_function("nn.max_pool2d", identity_partition_function)
def add_partition_generic(ref_call, new_args, ctx):
"""Rewrite function for ewise add for partition for generic devices"""
lhs_cond, lhs = partition_expr_check(new_args[0])
rhs_cond, rhs = partition_expr_check(new_args[1])
if lhs_cond and rhs_cond:
# - introduced by ResNet, when for the first residual connection
# ...
# %0 = nn.conv2d(%data, %meta[relay.Constant])
# %1 = add(%0, %meta[relay.Constant])
# %2 = nn.relu(%1)
# %3 = nn.max_pool2d(%2)
# ...
# %9 = nn.conv2d(%8, %meta[relay.Constant])
# %10 = add(%9, %meta[relay.Constant])
# %11 = add(%3, %10) <- need to insert annotations for %3, %10
# ...
lhs = new_args[0].realize()
rhs = new_args[1].realize()
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
if not lhs_cond and rhs_cond:
# - introduced by residual connection in ResNet
# ...
# %13 = nn.conv2d(%12, %meta[relay.Constant])
# %14 = add(%13, %meta[relay.Constant])
# %15 = annotation.cast_hint(%15, 'int8')
# %16 = annotation.stop_fusion(%16)
# %17 = add(%5, %16)
# %18 = nn.relu(%17)
# ...
# %24 = nn.conv2d(%23, %meta[relay.Constant])
# %25 = add(%24, %meta[relay.Constant])
# %26 = add(%18, %25) <- need to insert annotations for %25
# ...
rhs = new_args[1].realize()
return _forward_op(ref_call, [lhs, rhs])
if lhs_cond and not rhs_cond:
if _analysis.check_constant(rhs):
# - introduced by batch_norm: add(out, bias)
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
# - introduced by residual connection in MobileNetV2
# ...
# %81 = add(%80, meta[relay.Constant])
# %82 = annotation.cast_hint(%81, 'int8')
# %83 = annotation.stop_fusion(%82)
# %84 = add(%79, %83)
# ...
# %96 = nn.conv2d(%94, %meta[relay.Constant])
# %96 = add(%95, %meta[relay.Constant])
# %97 = add(%96, %84) <- need to insert annotations for %96
# ...
lhs = new_args[0].realize()
return _forward_op(ref_call, [lhs, rhs])
if not lhs_cond and not rhs_cond:
# trivial case
return None
raise ValueError
def mul_partition_generic(ref_call, new_args, ctx):
"""Rewrite function for ewise mul for partition for generic devices"""
lhs_cond, lhs = partition_expr_check(new_args[0])
rhs_cond, rhs = partition_expr_check(new_args[1])
if lhs_cond:
# introduced by bn: multiply(out, scale)
lhs = new_args[0].realize()
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
if rhs_cond:
# introduced by efficientnet
rhs = new_args[1].realize()
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
if not lhs_cond and not rhs_cond:
# trivial case
return None
raise ValueError
# TODO(ziheng) enhance `register_partition_function` to dispatch
# for target automatically
@register_partition_function("add")
def add_partition_function(ref_call, new_args, ctx):
"""Rewrite function for ewise add for partition"""
target = tvm.target.Target.current()
if target and "cuda" in target.keys:
# TODO(wuwei/ziheng) cuda specific rules
return add_partition_generic(ref_call, new_args, ctx)
return add_partition_generic(ref_call, new_args, ctx)
@register_partition_function("multiply")
def multiply_partition_function(ref_call, new_args, ctx):
"""Rewrite function for ewise multiply for partition"""
return mul_partition_generic(ref_call, new_args, ctx)
# add cast after the relu op to make it run on vta
@register_partition_function("nn.global_avg_pool2d")
def global_avg_pool2d_partition_function(ref_call, new_args, ctx):
cond, expr = partition_expr_check(new_args[0])
if cond:
expr = new_args[0].realize()
else:
expr = QPartitionExpr(new_args[0]).realize()
return _forward_op(ref_call, [expr])
| 6,579 | 36.386364 | 82 | py |
tvm | tvm-main/python/tvm/relay/quantize/_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Internal module for quantization."""
import tvm._ffi
tvm._ffi._init_api("relay._quantize", __name__)
| 924 | 41.045455 | 62 | py |
tvm | tvm-main/python/tvm/driver/build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The build utils in python."""
from typing import Union, Optional, List, Mapping
import warnings
import tvm.tir
from tvm import te
from tvm.runtime import Module
from tvm.runtime import ndarray
from tvm.ir import container
from tvm.tir import PrimFunc
from tvm.ir.module import IRModule
from tvm.te import tensor
from tvm.target import Target
from tvm.tir.buffer import Buffer
from tvm.tir.expr import Var
from tvm.driver import _ffi_api as _driver_ffi
from . import _ffi_api as ffi
def get_binds(args, compact=False, binds=None):
"""Internal function to get binds and arg_list given arguments.
Parameters
----------
args : list of Buffer or Tensor or Var
The argument lists to the function.
compact : bool
If the statement has already bound to a compact buffer.
binds : dict of :any:`Tensor` to :any:`Buffer`, optional
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
Returns
-------
binds: dict
The bind specification
arg_list: list
The list of symbolic buffers of arguments.
"""
binds, arg_list = ffi.get_binds(args, compact, binds)
return binds, arg_list
def schedule_to_module(
sch: te.Schedule,
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
name: str = "main",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
) -> IRModule:
"""According to the given schedule, form a function.
This is a low-level function intended for testing purposes, and
does not apply any optimization passes. In general, `tvm.lower`
and `tvm.build` should be used instead.
Parameters
----------
sch : tvm.te.schedule.Schedule
The given scheduler to form the raw body
args : list of Buffer or Tensor or Var
The argument lists to the function.
name : str
The name of result function, default name is "main"
binds : dict of :any:`Tensor` to :any:`Buffer`, optional
The binds information
Returns
-------
The body formed according to the given schedule
"""
return ffi.schedule_to_module(sch, args, name, binds)
def lower(
inp: Union[te.Schedule, PrimFunc, IRModule],
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
name: str = "main",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
simple_mode: bool = False,
) -> IRModule:
"""Lowering step before build into target.
Parameters
----------
inp : Union[tvm.te.schedule.Schedule, tvm.tir.PrimFunc, IRModule]
The TE schedule or TensorIR PrimFunc/IRModule to be built
args : Optional[List[Union[tvm.tir.Buffer, tensor.Tensor, Var]]]
The argument lists to the function for TE schedule.
It should be None if we want to lower TensorIR.
name : str
The name of the result function.
binds : Optional[Mapping[tensor.Tensor, tvm.tir.Buffer]]
Dictionary that maps the Tensor to Buffer which specified the data layout
requirement of the function. By default, a new compact buffer is created
for each tensor in the argument.
simple_mode : bool
Whether only output simple and compact statement, this will skip
LoopPartition, api wrapper generation and Unrolling.
Returns
-------
m : IRModule
The result IRModule
"""
if isinstance(inp, IRModule):
return ffi.lower_module(inp, simple_mode)
if isinstance(inp, PrimFunc):
return ffi.lower_primfunc(inp, name, simple_mode)
if isinstance(inp, te.Schedule):
return ffi.lower_schedule(inp, args, name, binds, simple_mode)
raise ValueError(
f"Expected input to be an IRModule, PrimFunc or te.Schedule, but got {type(inp)}"
)
def build(
inputs: Union[te.Schedule, PrimFunc, IRModule, Mapping[str, IRModule]],
args: Optional[List[Union[Buffer, tensor.Tensor, Var]]] = None,
target: Optional[Union[str, Target]] = None,
target_host: Optional[Union[str, Target]] = None,
runtime: Optional[
"tvm.relay.backend.Runtime"
] = None, # Type is annotated this way to avoid cyclic dependency
name: Optional[str] = "default_function",
binds: Optional[Mapping[tensor.Tensor, Buffer]] = None,
):
"""Build a function with arguments as signature. Code will be generated
for devices coupled with target information.
Parameters
----------
inputs : Union[tvm.te.schedule.Schedule, tvm.tir.PrimFunc, IRModule, Mapping[str, IRModule]]
The input to be built
args : Optional[List[Union[tvm.tir.Buffer, tensor.Tensor, Var]]]
The argument lists to the function.
target : Optional[Union[str, Target]]
The target and option of the compilation.
target_host : Optional[Union[str, Target]]
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm interpreter is used.
runtime : Optional[Runtime]
Runtime to generate artifacts for
name : Optional[str]
The name of result function.
binds : Optional[Mapping[tensor.Tensor, tvm.tir.Buffer]]
Dictionary that maps the binding of symbolic buffer to Tensor.
By default, a new buffer is created for each tensor in the argument.
Returns
-------
ret : tvm.module
A module that combines both host and device code.
Examples
________
There are two typical example uses of this function depending on the type
of the argument `inputs`:
1. it is an IRModule.
.. code-block:: python
n = 2
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s = tvm.te.create_schedule(C.op)
m = tvm.lower(s, [A, B, C], name="test_add")
rt_mod = tvm.build(m, target="llvm")
2. it is a dict of compilation target to IRModule.
.. code-block:: python
n = 2
A = te.placeholder((n,), name='A')
B = te.placeholder((n,), name='B')
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name='C')
s1 = tvm.te.create_schedule(C.op)
with tvm.target.cuda() as cuda_tgt:
s2 = topi.cuda.schedule_injective(cuda_tgt, [C])
m1 = tvm.lower(s1, [A, B, C], name="test_add1")
m2 = tvm.lower(s2, [A, B, C], name="test_add2")
rt_mod = tvm.build({"llvm": m1, "cuda": m2})
Note
----
See the note on :any:`tvm.target` on target string format.
"""
if isinstance(inputs, te.Schedule):
if args is None:
raise ValueError("args must be given for build from schedule")
input_mod = lower(inputs, args, name=name, binds=binds)
elif isinstance(inputs, (list, tuple, container.Array)):
merged_mod = tvm.IRModule({})
for x in inputs:
merged_mod.update(lower(x))
input_mod = merged_mod
elif isinstance(inputs, PrimFunc):
input_mod = lower(inputs, name=name)
elif isinstance(inputs, tvm.IRModule):
input_mod = lower(inputs)
elif not isinstance(inputs, (dict, container.Map)):
raise ValueError(
f"Inputs must be te.Schedule, IRModule, PrimFunc, "
f"or dict of target to IRModule, "
f"but got {type(inputs)}."
)
if not isinstance(inputs, (dict, container.Map)):
target = Target.current() if target is None else target
target = target if target else "llvm"
target_input_mod = {target: input_mod}
else:
target_input_mod = inputs
# Because modules can be created from a variety of sources, we annotate them
# with the relevant attributes here to ensure they propagate
annotated_mods = {}
for tar, mod in target_input_mod.items():
if not isinstance(tar, (str, Target)):
raise ValueError("The key of inputs must be str or " "Target when inputs is dict.")
if not isinstance(mod, tvm.IRModule):
raise ValueError("inputs must be Schedule, IRModule," "or dict of str to IRModule.")
annotated_mods[tar] = mod.with_attr("runtime", runtime)
# TODO(mbs): Both CompilationConfig and TIRToRuntime implement the same host target
# defaulting logic, but there's currently no way to get back the decided host.
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
if not target_host:
for tar, mod in annotated_mods.items():
device_type = ndarray.device(tar.kind.name, 0).device_type
if device_type == ndarray.cpu(0).device_type:
target_host = tar
break
if not target_host:
target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
rt_mod_host = _driver_ffi.tir_to_runtime(annotated_mods, target_host)
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
if not isinstance(target_host, Target):
target_host = Target(target_host)
if str(runtime) == "crt" and runtime["system-lib"]:
if target_host.kind.name == "c":
create_csource_crt_metadata_module = tvm._ffi.get_global_func(
"runtime.CreateCSourceCrtMetadataModule"
)
to_return = create_csource_crt_metadata_module([rt_mod_host], target_host, runtime)
elif target_host.kind.name == "llvm":
create_llvm_crt_metadata_module = tvm._ffi.get_global_func(
"runtime.CreateLLVMCrtMetadataModule"
)
to_return = create_llvm_crt_metadata_module([rt_mod_host], target_host, runtime)
else:
to_return = rt_mod_host
return OperatorModule.from_module(to_return, ir_module_by_target=annotated_mods, name=name)
class OperatorModule(Module):
"""Wraps the Module returned by tvm.build() and captures additional outputs of that function."""
@classmethod
def from_module(cls, mod, **kwargs):
# NOTE(areusch): It is generally unsafe to continue using `mod` from this point forward.
# If an exception occurs in cls.__init__, handle will be deleted. For this reason,
# set mod.handle to None.
handle = mod.handle
mod.handle = None
return cls(handle, **kwargs)
def __init__(self, handle, ir_module_by_target=None, name=None):
super(OperatorModule, self).__init__(handle)
self.ir_module_by_target = ir_module_by_target
self.name = name
| 12,048 | 36.535826 | 100 | py |
tvm | tvm-main/python/tvm/driver/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.driver"""
import tvm._ffi
tvm._ffi._init_api("driver", __name__)
| 871 | 40.52381 | 62 | py |
tvm | tvm-main/python/tvm/driver/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for driver APIs"""
from .build_module import lower, build
| 856 | 44.105263 | 62 | py |
tvm | tvm-main/python/tvm/driver/tvmc/shape_parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Shape Parsing
"""
import argparse
import re
from tvm import relay
def parse_shape_string(inputs_string):
"""Parse an input shape dictionary string to a usable dictionary.
Parameters
----------
inputs_string: str
A string of the form "input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]" that
indicates the desired shape for specific model inputs. Colons, forward slashes and dots
within input_names are supported. Spaces are supported inside of dimension arrays.
Returns
-------
shape_dict: dict
A dictionary mapping input names to their shape for use in relay frontend converters.
"""
# Create a regex pattern that extracts each separate input mapping.
# We want to be able to handle:
# * Spaces inside arrays
# * forward slashes inside names (but not at the beginning or end)
# * colons inside names (but not at the beginning or end)
# * dots inside names
pattern = r"(?:\w+\/)?[:\w.]+\:\s*\[\-?\d+(?:\,\s*\-?\d+)*\]"
input_mappings = re.findall(pattern, inputs_string)
if not input_mappings:
raise argparse.ArgumentTypeError(
"--input-shapes argument must be of the form "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]"'
)
shape_dict = {}
for mapping in input_mappings:
# Remove whitespace.
mapping = mapping.replace(" ", "")
# Split mapping into name and shape.
name, shape_string = mapping.rsplit(":", 1)
# Convert shape string into a list of integers or Anys if negative.
shape = [int(x) if int(x) > 0 else relay.Any() for x in shape_string.strip("][").split(",")]
# Add parsed mapping to shape dictionary.
shape_dict[name] = shape
return shape_dict
| 2,590 | 37.102941 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/pass_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC PassContext Interface
"""
import importlib
import tvm
from tvm.driver.tvmc import TVMCException
def load_function(full_name):
"""Dynamic loading a function by the full name.
Parameters
----------
full_name: str
The name of a PackedFunc or a string of the form "path.to.module.func"
that indicates the module that can be imported.
You must be aware of the load order here, it first tries to find it via
TVM global function, if not find, try to import it by "importlib.import_module".
Returns
-------
func: function or PackedFunc
The loaded fucntion.
"""
global_func = tvm.get_global_func(full_name, allow_missing=True)
if global_func is not None:
return global_func
# split full name "path.to.module.func" into two parts ["path.to.module", "func"]
module_name, func_name = full_name.rsplit(".", 1)
# import module and find the function
module = importlib.import_module(module_name)
if hasattr(module, func_name):
return getattr(module, func_name)
raise TVMCException(f"No function '{func_name}' found in module '{module_name}'.")
def get_pass_config_value(name, value, config_type):
"""Get a PassContext configuration value, based on its config data type.
Parameters
----------
name: str
config identifier name.
value: str
value assigned to the config, provided via command line.
config_type: str
data type defined to the config, as string.
Returns
-------
parsed_value: bool, int or str
a representation of the input value, converted to the type
specified by config_type.
"""
parsed_value = None
if config_type == "IntImm":
# "Bool" configurations in the PassContext are recognized as
# IntImm, so deal with this case here
mapping_values = {
"false": False,
"true": True,
}
if value.isdigit():
parsed_value = int(value)
else:
# if not an int, accept only values on the mapping table, case insensitive
parsed_value = mapping_values.get(value.lower(), None)
if parsed_value is None:
raise TVMCException(f"Invalid value '{value}' for configuration '{name}'.")
elif config_type == "runtime.String":
parsed_value = value
elif config_type == "Array":
if name == "tir.add_lower_pass":
pass_list = value.split(",")
if len(pass_list) % 2 != 0:
raise TVMCException(
f"The configuration of '{name}' must be of the form "
"'tir.add_lower_pass=opt_level1,pass1,opt_evel2,pass2'"
)
parsed_value = []
for i in range(0, len(pass_list), 2):
level, pass_func = pass_list[i].strip(), pass_list[i + 1].strip()
try:
level = int(level)
except ValueError:
raise TVMCException(f"Only integer is allow for configuration '{name}'.")
# TODO (@leeexyz) We should parse configurations of each tir Pass.
# For now, we only use the defaults. Currently, There are four config nodes:
# `tir.transform.LoopPartitionConfig`
# `tir.transform.UnrollLoopConfig`
# `tir.transform.HoistIfThenElseConfig`
# `tir.transform.InjectDoubleBufferConfig`
# loading pass func and calling it to get the Pass
pass_func = load_function(pass_func)()
parsed_value.append((level, pass_func))
else:
raise TVMCException(f"Unsupported configuration '{name}' for '{config_type}' type.")
else:
# not raise here cause we alreay checked before calling this function
pass
return parsed_value
def parse_configs(input_configs):
"""Parse configuration values set via command line.
Parameters
----------
input_configs: list of str
list of configurations provided via command line.
Returns
-------
pass_context_configs: dict
a dict containing key-value configs to be used in the PassContext.
"""
if not input_configs:
return {}
all_configs = tvm.ir.transform.PassContext.list_configs()
supported_config_types = ("IntImm", "runtime.String", "Array")
supported_configs = [
name for name in all_configs.keys() if all_configs[name]["type"] in supported_config_types
]
pass_context_configs = {}
for config in input_configs:
if not config:
raise TVMCException(
f"Invalid format for configuration '{config}', use <config>=<value>"
)
# Each config is expected to be provided as "name=value"
try:
name, value = config.split("=")
name = name.strip()
value = value.strip()
except ValueError:
raise TVMCException(
f"Invalid format for configuration '{config}', use <config>=<value>"
)
if name not in all_configs:
raise TVMCException(
f"Configuration '{name}' is not defined in TVM. "
f"These are the existing configurations: {', '.join(all_configs)}"
)
if name not in supported_configs:
raise TVMCException(
f"Configuration '{name}' uses a data type not supported by TVMC. "
f"The following configurations are supported: {', '.join(supported_configs)}"
)
config_type = all_configs[name]["type"]
parsed_value = get_pass_config_value(name, value, config_type)
if config_type == "Array" and name in pass_context_configs:
# merge configs if the configuration exists
pass_context_configs[name].extend(parsed_value)
else:
pass_context_configs[name] = parsed_value
return pass_context_configs
| 6,850 | 34.133333 | 98 | py |
tvm | tvm-main/python/tvm/driver/tvmc/target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains functions for processing target inputs for the TVMC CLI
"""
import os
import logging
import json
import re
import tvm
from tvm.driver import tvmc
from tvm.driver.tvmc import TVMCException
from tvm.driver.tvmc.composite_target import get_codegen_by_target, get_codegen_names
from tvm.ir.attrs import make_node, _ffi_api as attrs_api
from tvm.ir.transform import PassContext
from tvm.target import Target, TargetKind
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
# We can't tell the type inside an Array but all current options are strings so
# it can default to that. Bool is used alongside Integer but aren't distinguished
# between as both are represented by IntImm
INTERNAL_TO_NATIVE_TYPE = {"runtime.String": str, "IntImm": int, "Array": str}
INTERNAL_TO_HELP = {"runtime.String": " string", "IntImm": "", "Array": " options"}
def _valid_target_kinds():
codegen_names = tvmc.composite_target.get_codegen_names()
return filter(lambda target: target not in codegen_names, Target.list_kinds())
def _generate_target_kind_args(parser, kind_name):
target_group = parser.add_argument_group(f"target {kind_name}")
for target_option, target_type in TargetKind.options_from_name(kind_name).items():
if target_type in INTERNAL_TO_NATIVE_TYPE:
target_group.add_argument(
f"--target-{kind_name}-{target_option}",
type=INTERNAL_TO_NATIVE_TYPE[target_type],
help=f"target {kind_name} {target_option}{INTERNAL_TO_HELP[target_type]}",
)
def _generate_codegen_args(parser, codegen_name):
codegen = get_codegen_by_target(codegen_name)
pass_configs = PassContext.list_configs()
if codegen["config_key"] is not None and codegen["config_key"] in pass_configs:
target_group = parser.add_argument_group(f"target {codegen_name}")
attrs = make_node(pass_configs[codegen["config_key"]]["type"])
fields = attrs_api.AttrsListFieldInfo(attrs)
for field in fields:
for tvm_type, python_type in INTERNAL_TO_NATIVE_TYPE.items():
if field.type_info.startswith(tvm_type):
target_option = field.name
target_group.add_argument(
f"--target-{codegen_name}-{target_option}",
type=python_type,
help=field.description,
)
def generate_target_args(parser):
"""Walks through the TargetKind registry and generates arguments for each Target's options"""
parser.add_argument(
"--target",
help="compilation target as plain string, inline JSON or path to a JSON file",
required=False,
)
for target_kind in _valid_target_kinds():
_generate_target_kind_args(parser, target_kind)
for codegen_name in get_codegen_names():
_generate_codegen_args(parser, codegen_name)
def _reconstruct_target_kind_args(args, kind_name):
kind_options = {}
for target_option, target_type in TargetKind.options_from_name(kind_name).items():
if target_type in INTERNAL_TO_NATIVE_TYPE:
var_name = f"target_{kind_name.replace('-', '_')}_{target_option.replace('-', '_')}"
option_value = getattr(args, var_name)
if option_value is not None:
kind_options[target_option] = getattr(args, var_name)
return kind_options
def _reconstruct_codegen_args(args, codegen_name):
codegen = get_codegen_by_target(codegen_name)
pass_configs = PassContext.list_configs()
codegen_options = {}
if codegen["config_key"] is not None and codegen["config_key"] in pass_configs:
attrs = make_node(pass_configs[codegen["config_key"]]["type"])
fields = attrs_api.AttrsListFieldInfo(attrs)
for field in fields:
for tvm_type in INTERNAL_TO_NATIVE_TYPE:
if field.type_info.startswith(tvm_type):
target_option = field.name
var_name = (
f"target_{codegen_name.replace('-', '_')}_{target_option.replace('-', '_')}"
)
option_value = getattr(args, var_name)
if option_value is not None:
codegen_options[target_option] = option_value
return codegen_options
def reconstruct_target_args(args):
"""Reconstructs the target options from the arguments"""
reconstructed = {}
for target_kind in _valid_target_kinds():
kind_options = _reconstruct_target_kind_args(args, target_kind)
if kind_options:
reconstructed[target_kind] = kind_options
for codegen_name in get_codegen_names():
codegen_options = _reconstruct_codegen_args(args, codegen_name)
if codegen_options:
reconstructed[codegen_name] = codegen_options
return reconstructed
def validate_targets(parse_targets, additional_target_options=None):
"""
Apply a series of validations in the targets provided via CLI.
"""
tvm_target_kinds = tvm.target.Target.list_kinds()
targets = [t["name"] for t in parse_targets]
if len(targets) > len(set(targets)):
raise TVMCException("Duplicate target definitions are not allowed")
if targets[-1] not in tvm_target_kinds:
tvm_target_names = ", ".join(tvm_target_kinds)
raise TVMCException(
f"The last target needs to be a TVM target. Choices: {tvm_target_names}"
)
tvm_targets = [t for t in targets if t in _valid_target_kinds()]
if len(tvm_targets) > 2:
verbose_tvm_targets = ", ".join(tvm_targets)
raise TVMCException(
"Only two of the following targets can be used at a time. "
f"Found: {verbose_tvm_targets}."
)
if additional_target_options is not None:
for target_name in additional_target_options:
if not any([target for target in parse_targets if target["name"] == target_name]):
first_option = list(additional_target_options[target_name].keys())[0]
raise TVMCException(
f"Passed --target-{target_name}-{first_option}"
f" but did not specify {target_name} target"
)
def tokenize_target(target):
"""
Extract a list of tokens from a target specification text.
It covers some corner-cases that are not covered by the built-in
module 'shlex', such as the use of "+" as a punctuation character.
Example
-------
For the input `foo -op1=v1 -op2="v ,2", bar -op3=v-4` we
should obtain:
["foo", "-op1=v1", "-op2="v ,2"", ",", "bar", "-op3=v-4"]
Parameters
----------
target : str
Target options sent via CLI arguments
Returns
-------
list of str
a list of parsed tokens extracted from the target string
"""
# Regex to tokenize the "--target" value. It is split into five parts
# to match with:
# 1. target and option names e.g. llvm, -mattr=, -mcpu=
# 2. option values, all together, without quotes e.g. -mattr=+foo,+opt
# 3. option values, when single quotes are used e.g. -mattr='+foo, +opt'
# 4. option values, when double quotes are used e.g. -mattr="+foo ,+opt"
# 5. commas that separate different targets e.g. "my-target, llvm"
target_pattern = (
r"(\-{0,2}[\w\-]+\=?"
r"(?:[\w\+\-\.]+(?:,[\w\+\-\.])*"
r"|[\'][\w\+\-,\s\.]+[\']"
r"|[\"][\w\+\-,\s\.]+[\"])*"
r"|,)"
)
return re.findall(target_pattern, target)
def parse_target(target):
"""
Parse a plain string of targets provided via a command-line
argument.
To send more than one codegen, a comma-separated list
is expected. Options start with -<option_name>=<value>.
We use python standard library 'shlex' to parse the argument in
a POSIX compatible way, so that if options are defined as
strings with spaces or commas, for example, this is considered
and parsed accordingly.
Example
-------
For the input `--target="foo -op1=v1 -op2="v ,2", bar -op3=v-4"` we
should obtain:
[
{
name: "foo",
opts: {"op1":"v1", "op2":"v ,2"},
raw: 'foo -op1=v1 -op2="v ,2"'
},
{
name: "bar",
opts: {"op3":"v-4"},
raw: 'bar -op3=v-4'
}
]
Parameters
----------
target : str
Target options sent via CLI arguments
Returns
-------
codegens : list of dict
This list preserves the order in which codegens were
provided via command line. Each Dict contains three keys:
'name', containing the name of the codegen; 'opts' containing
a key-value for all options passed via CLI; 'raw',
containing the plain string for this codegen
"""
codegen_names = tvmc.composite_target.get_codegen_names()
codegens = []
tvm_target_kinds = tvm.target.Target.list_kinds()
parsed_tokens = tokenize_target(target)
split_codegens = []
current_codegen = []
split_codegens.append(current_codegen)
for token in parsed_tokens:
# every time there is a comma separating
# two codegen definitions, prepare for
# a new codegen
if token == ",":
current_codegen = []
split_codegens.append(current_codegen)
else:
# collect a new token for the current
# codegen being parsed
current_codegen.append(token)
# at this point we have a list of lists,
# each item on the first list is a codegen definition
# in the comma-separated values
for codegen_def in split_codegens:
# the first is expected to be the name
name = codegen_def[0]
is_tvm_target = name in tvm_target_kinds and name not in codegen_names
raw_target = " ".join(codegen_def)
all_opts = codegen_def[1:] if len(codegen_def) > 1 else []
opts = {}
for opt in all_opts:
try:
# deal with -- prefixed flags
if opt.startswith("--"):
opt_name = opt[2:]
opt_value = True
else:
opt = opt[1:] if opt.startswith("-") else opt
opt_name, opt_value = opt.split("=", maxsplit=1)
# remove quotes from the value: quotes are only parsed if they match,
# so it is safe to assume that if the string starts with quote, it ends
# with quote.
opt_value = opt_value[1:-1] if opt_value[0] in ('"', "'") else opt_value
except ValueError:
raise ValueError(f"Error when parsing '{opt}'")
opts[opt_name] = opt_value
codegens.append(
{"name": name, "opts": opts, "raw": raw_target, "is_tvm_target": is_tvm_target}
)
return codegens
def is_inline_json(target):
try:
json.loads(target)
return True
except json.decoder.JSONDecodeError:
return False
def _combine_target_options(target, additional_target_options=None):
if additional_target_options is None:
return target
if target["name"] in additional_target_options:
target["opts"].update(additional_target_options[target["name"]])
return target
def _recombobulate_target(target):
name = target["name"]
opts = " ".join([f"-{key}={value}" for key, value in target["opts"].items()])
return f"{name} {opts}"
def target_from_cli(target, additional_target_options=None):
"""
Create a tvm.target.Target instance from a
command line interface (CLI) string.
Parameters
----------
target : str
compilation target as plain string,
inline JSON or path to a JSON file
additional_target_options: Optional[Dict[str, Dict[str,str]]]
dictionary of additional target options to be
combined with parsed targets
Returns
-------
tvm.target.Target
an instance of target device information
extra_targets : list of dict
This list preserves the order in which extra targets were
provided via command line. Each Dict contains three keys:
'name', containing the name of the codegen; 'opts' containing
a key-value for all options passed via CLI; 'raw',
containing the plain string for this codegen
"""
extra_targets = []
if os.path.isfile(target):
with open(target) as target_file:
logger.debug("target input is a path: %s", target)
target = "".join(target_file.readlines())
elif is_inline_json(target):
logger.debug("target input is inline JSON: %s", target)
else:
logger.debug("target input is plain text: %s", target)
try:
parsed_targets = parse_target(target)
except ValueError as error:
raise TVMCException(f"Error parsing target string '{target}'.\nThe error was: {error}")
validate_targets(parsed_targets, additional_target_options)
tvm_targets = [
_combine_target_options(t, additional_target_options)
for t in parsed_targets
if t["is_tvm_target"]
]
# Validated target strings have 1 or 2 tvm targets, otherwise
# `validate_targets` above will fail.
if len(tvm_targets) == 1:
target = _recombobulate_target(tvm_targets[0])
target_host = None
else:
assert len(tvm_targets) == 2
target = _recombobulate_target(tvm_targets[0])
target_host = _recombobulate_target(tvm_targets[1])
extra_targets = [
_combine_target_options(t, additional_target_options)
for t in parsed_targets
if not t["is_tvm_target"]
]
return tvm.target.Target(target, host=target_host), extra_targets
| 14,781 | 35.053659 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/main.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=use-a-generator
"""
TVMC - TVM driver command-line interface
"""
import argparse
import logging
import sys
import tvm
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.config_options import (
read_and_convert_json_into_dict,
convert_config_json_to_cli,
)
REGISTERED_PARSER = []
def register_parser(make_subparser):
"""
Utility function to register a subparser for tvmc.
Functions decorated with `tvm.driver.tvmc.main.register_parser` will be invoked
with a parameter containing the subparser instance they need to add itself to,
as a parser.
Example
-------
@register_parser
def _example_parser(main_subparser):
subparser = main_subparser.add_parser('example', help='...')
...
"""
REGISTERED_PARSER.append(make_subparser)
return make_subparser
def _main(argv):
"""TVM command line interface."""
parser = argparse.ArgumentParser(
prog="tvmc",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="TVM compiler driver",
epilog=__doc__,
# Help action will be added later, after all subparsers are created,
# so it doesn't interfere with the creation of the dynamic subparsers.
add_help=False,
)
parser.add_argument("--config", default="default", help="configuration json file")
config_arg, argv = parser.parse_known_args(argv)
json_param_dict = read_and_convert_json_into_dict(config_arg)
json_config_values = convert_config_json_to_cli(json_param_dict)
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity")
parser.add_argument("--version", action="store_true", help="print the version and exit")
subparser = parser.add_subparsers(title="commands")
for make_subparser in REGISTERED_PARSER:
make_subparser(subparser, parser, json_config_values)
# Finally, add help for the main parser.
parser.add_argument("-h", "--help", action="help", help="show this help message and exit.")
args = parser.parse_args(argv)
args.verbose = min(args.verbose, 3)
# See the meaning of the logging levels at
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(stream=sys.stdout)
logging.getLogger("TVMC").setLevel(40 - args.verbose * 10)
if args.version:
sys.stdout.write("%s\n" % tvm.__version__)
return 0
if not hasattr(args, "func"):
# In case no valid subcommand is provided, show usage and exit
parser.print_help(sys.stderr)
return 1
try:
return args.func(args)
except TVMCImportError as err:
sys.stderr.write(
f'Package "{err}" is not installed. ' f'Hint: "pip install tlcpack[tvmc]".'
)
return 5
except TVMCException as err:
sys.stderr.write("Error: %s\n" % err)
return 4
def main():
sys.exit(_main(sys.argv[1:]))
if __name__ == "__main__":
main()
| 3,853 | 30.333333 | 96 | py |
tvm | tvm-main/python/tvm/driver/tvmc/frontends.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to parse models from different frameworks into Relay networks.
Frontend classes do lazy-loading of modules on purpose, to reduce time spent on
loading the tool.
"""
import logging
import os
import sys
import re
import importlib
from abc import ABC
from abc import abstractmethod
from typing import Optional, List, Dict
from pathlib import Path
import numpy as np
from tvm import relay
from tvm import parser
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.model import TVMCModel
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
class Frontend(ABC):
"""Abstract class for command line driver frontend.
Provide a unified way to import models (as files), and deal
with any required preprocessing to create a TVM module from it."""
@staticmethod
@abstractmethod
def name():
"""Frontend name"""
@staticmethod
@abstractmethod
def suffixes():
"""File suffixes (extensions) used by this frontend"""
@abstractmethod
def load(self, path, shape_dict=None, **kwargs):
"""Load a model from a given path.
Parameters
----------
path: str
Path to a file
shape_dict: dict, optional
Mapping from input names to their shapes.
Returns
-------
mod : tvm.IRModule
The produced relay module.
params : dict
The parameters (weights) for the relay module.
"""
def lazy_import(pkg_name, from_pkg_name=None, hide_stderr=False):
"""Lazy import a frontend package or subpackage"""
try:
return importlib.import_module(pkg_name, package=from_pkg_name)
except ImportError as error:
raise TVMCImportError(pkg_name) from error
finally:
if hide_stderr:
sys.stderr = stderr
class KerasFrontend(Frontend):
"""Keras frontend for TVMC"""
@staticmethod
def name():
return "keras"
@staticmethod
def suffixes():
return ["h5"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0103
tf = lazy_import("tensorflow")
keras = lazy_import("keras", from_pkg_name="tensorflow")
# tvm build currently imports keras directly instead of tensorflow.keras
try:
model = keras.models.load_model(path)
except ValueError as err:
raise TVMCException(str(err))
# There are two flavours of keras model, sequential and
# functional, TVM expects a functional model, so convert
# if required:
if self.is_sequential_p(model):
model = self.sequential_to_functional(model)
in_shapes = []
for layer in model._input_layers:
if tf.executing_eagerly():
in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape))
else:
in_shapes.append(
tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape)
)
inputs = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
input_shapes = {name: x.shape for (name, x) in zip(model.input_names, inputs)}
if shape_dict is not None:
input_shapes.update(shape_dict)
kwargs.setdefault("layout", "NHWC")
return relay.frontend.from_keras(model, input_shapes, **kwargs)
def is_sequential_p(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
return isinstance(model, keras.models.Sequential)
def sequential_to_functional(self, model):
keras = lazy_import("keras", from_pkg_name="tensorflow")
assert self.is_sequential_p(model)
input_layer = keras.layers.Input(batch_shape=model.layers[0].input_shape)
prev_layer = input_layer
for layer in model.layers:
prev_layer = layer(prev_layer)
model = keras.models.Model([input_layer], [prev_layer])
return model
class OnnxFrontend(Frontend):
"""ONNX frontend for TVMC"""
@staticmethod
def name():
return "onnx"
@staticmethod
def suffixes():
return ["onnx"]
def load(self, path, shape_dict=None, **kwargs):
onnx = lazy_import("onnx")
# pylint: disable=E1101
model = onnx.load(path)
return relay.frontend.from_onnx(model, shape=shape_dict, **kwargs)
class TensorflowFrontend(Frontend):
"""TensorFlow frontend for TVMC"""
@staticmethod
def name():
return "pb"
@staticmethod
def suffixes():
return ["pb"]
def load(self, path, shape_dict=None, **kwargs):
tf = lazy_import("tensorflow")
tf_testing = lazy_import("tvm.relay.testing.tf")
with tf.io.gfile.GFile(path, "rb") as tf_graph:
content = tf_graph.read()
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(content)
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
logger.debug("parse TensorFlow model and convert into Relay computation graph")
return relay.frontend.from_tensorflow(graph_def, shape=shape_dict, **kwargs)
class TFLiteFrontend(Frontend):
"""TFLite frontend for TVMC"""
@staticmethod
def name():
return "tflite"
@staticmethod
def suffixes():
return ["tflite"]
def load(self, path, shape_dict=None, **kwargs):
model = lazy_import("tflite.Model")
with open(path, "rb") as tf_graph:
content = tf_graph.read()
# tflite.Model.Model is tflite.Model in 1.14 and 2.1.0
try:
tflite_model = model.Model.GetRootAsModel(content, 0)
except AttributeError:
tflite_model = model.GetRootAsModel(content, 0)
try:
version = tflite_model.Version()
logger.debug("tflite version %s", version)
except Exception:
raise TVMCException("input file not tflite")
if version != 3:
raise TVMCException("input file not tflite version 3")
logger.debug("parse TFLite model and convert into Relay computation graph")
mod, params = relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, **kwargs)
return mod, params
class PyTorchFrontend(Frontend):
"""PyTorch frontend for TVMC"""
@staticmethod
def name():
return "pytorch"
@staticmethod
def suffixes():
# Torch Script is a zip file, but can be named pth
return ["pth", "zip"]
def load(self, path, shape_dict=None, **kwargs):
torch = lazy_import("torch")
if shape_dict is None:
raise TVMCException("--input-shapes must be specified for %s" % self.name())
traced_model = torch.jit.load(path)
traced_model.eval() # Switch to inference mode
# Convert shape dictionary to list for Pytorch frontend compatibility
input_shapes = list(shape_dict.items())
logger.debug("parse Torch model and convert into Relay computation graph")
return relay.frontend.from_pytorch(
traced_model, input_shapes, keep_quantized_weight=True, **kwargs
)
class PaddleFrontend(Frontend):
"""PaddlePaddle frontend for TVMC"""
@staticmethod
def name():
return "paddle"
@staticmethod
def suffixes():
return ["pdmodel"]
def load(self, path, shape_dict=None, **kwargs):
# pylint: disable=C0415
import paddle
paddle.enable_static()
paddle.disable_signal_handler()
if not os.path.exists(path):
raise TVMCException("File {} is not exist.".format(path))
if not path.endswith(".pdmodel"):
raise TVMCException("Path of model file should be endwith suffixes '.pdmodel'.")
prefix = "".join(path.strip().split(".")[:-1])
params_file_path = prefix + ".pdiparams"
if not os.path.exists(params_file_path):
raise TVMCException("File {} is not exist.".format(params_file_path))
# pylint: disable=E1101
exe = paddle.static.Executor(paddle.CPUPlace())
prog, _, _ = paddle.static.load_inference_model(prefix, exe)
return relay.frontend.from_paddle(prog, shape_dict=shape_dict, **kwargs)
class RelayFrontend(Frontend):
"""Relay frontend for TVMC"""
@staticmethod
def name():
return "relay"
@staticmethod
def suffixes():
return ["relay"]
def load(self, path, shape_dict=None, **kwargs):
with open(path, "r", encoding="utf-8") as relay_text:
text = relay_text.read()
if shape_dict is None:
logger.warning(
"Specify --input-shapes to ensure that model inputs "
"will not be considered as constants."
)
def _validate_text(text):
"""Check the provided file contents.
The relay.txt artifact contained in the MLF is missing the version header and
the metadata which is required to use meta[relay.Constant]."""
if re.compile(r".*\#\[version\.*").match(text) is None:
raise TVMCException(
"The relay model does not include the required version information."
)
if re.compile(r".*meta\[.+\].*", re.DOTALL).match(text):
if "#[metadata]" not in text:
raise TVMCException(
"The relay model does not include the required #[metadata] section. "
"Use ir_mod.astext(show_meta_data=True) to export compatible code."
)
_validate_text(text)
ir_mod = parser.fromtext(text)
if shape_dict:
input_names = shape_dict.keys()
else:
input_names = []
def _gen_params(ir_mod, skip_names=None):
"""Populate the all the params in the mode with ones."""
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
params = {}
for name, shape in shape_dict.items():
if skip_names and name in skip_names:
continue
if "int" in type_dict[name]:
data = np.random.randint(128, size=shape, dtype=type_dict[name])
else:
data = np.random.uniform(-1, 1, size=shape).astype(type_dict[name])
params[name] = data
return params
params = _gen_params(ir_mod, skip_names=input_names)
return ir_mod, params
ALL_FRONTENDS = [
KerasFrontend,
OnnxFrontend,
TensorflowFrontend,
TFLiteFrontend,
PyTorchFrontend,
PaddleFrontend,
RelayFrontend,
]
def get_frontend_names():
"""Return the names of all supported frontends
Returns
-------
list : list of str
A list of frontend names as strings
"""
return [frontend.name() for frontend in ALL_FRONTENDS]
def get_frontend_by_name(name: str):
"""
This function will try to get a frontend instance, based
on the name provided.
Parameters
----------
name : str
the name of a given frontend
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
for frontend in ALL_FRONTENDS:
if name == frontend.name():
return frontend()
raise TVMCException(
"unrecognized frontend '{0}'. Choose from: {1}".format(name, get_frontend_names())
)
def guess_frontend(path: str):
"""
This function will try to imply which framework is being used,
based on the extension of the file provided in the path parameter.
Parameters
----------
path : str
The path to the model file.
Returns
-------
frontend : tvm.driver.tvmc.Frontend
An instance of the frontend that matches with
the file extension provided in `path`.
"""
suffix = Path(path).suffix.lower()
if suffix.startswith("."):
suffix = suffix[1:]
for frontend in ALL_FRONTENDS:
if suffix in frontend.suffixes():
return frontend()
raise TVMCException("failed to infer the model format. Please specify --model-format")
def load_model(
path: str,
model_format: Optional[str] = None,
shape_dict: Optional[Dict[str, List[int]]] = None,
**kwargs,
):
"""Load a model from a supported framework and convert it
into an equivalent relay representation.
Parameters
----------
path : str
The path to the model file.
model_format : str, optional
The underlying framework used to create the model.
If not specified, this will be inferred from the file type.
shape_dict : dict, optional
Mapping from input names to their shapes.
Returns
-------
tvmc_model : TVMCModel
The produced model package.
"""
if model_format is not None:
frontend = get_frontend_by_name(model_format)
else:
frontend = guess_frontend(path)
mod, params = frontend.load(path, shape_dict, **kwargs)
return TVMCModel(mod, params)
| 14,237 | 28.724426 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/__main__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC - TVM driver command-line interface
"""
from tvm.driver import tvmc
if __name__ == "__main__":
tvmc.main.main()
| 912 | 35.52 | 62 | py |
tvm | tvm-main/python/tvm/driver/tvmc/tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
"""
TVMC Remote Tracker
"""
import logging
from urllib.parse import urlparse
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
def tracker_host_port_from_cli(rpc_tracker_str):
"""Extract hostname and (optional) port from strings
like "1.2.3.4:9090" or "4.3.2.1".
Used as a helper function to cover --rpc-tracker
command line argument, in different subcommands.
Parameters
----------
rpc_tracker_str : str
hostname (or IP address) and port of the RPC tracker,
in the format 'hostname[:port]'.
Returns
-------
rpc_hostname : str or None
hostname or IP address, extracted from input.
rpc_port : int or None
port number extracted from input (9090 default).
"""
rpc_hostname = rpc_port = None
if rpc_tracker_str:
parsed_url = urlparse("//%s" % rpc_tracker_str)
rpc_hostname = parsed_url.hostname
rpc_port = parsed_url.port or 9090
logger.info("RPC tracker hostname: %s", rpc_hostname)
logger.info("RPC tracker port: %s", rpc_port)
return rpc_hostname, rpc_port
| 1,840 | 30.741379 | 62 | py |
tvm | tvm-main/python/tvm/driver/tvmc/config_options.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
manipulate json config file to work with TVMC
"""
import os
import json
from tvm._ffi import libinfo
from tvm.driver.tvmc import TVMCException
CONFIGS_JSON_DIR = None
class ConfigsJsonNotFoundError(TVMCException):
"""Raised when the JSON configs dirtree cannot be found."""
def get_configs_json_dir() -> str:
"""Find the 'configs' directory, containing the JSON files used to configure tvmc
with persistent argument settings.
Returns
-------
str :
The path to the 'configs' directory
"""
global CONFIGS_JSON_DIR
if CONFIGS_JSON_DIR is None:
# If a non-default location for the build directory is used, e.g. set via TVM_LIBRARY_PATH
# we need to provide the user a way to overwrite CONFIGS_JSON_DIR as well.
if os.environ.get("TVM_CONFIGS_JSON_DIR", None):
user_config_dir = os.environ["TVM_CONFIGS_JSON_DIR"]
if os.path.isdir(user_config_dir):
CONFIGS_JSON_DIR = user_config_dir
return CONFIGS_JSON_DIR
candidate_paths = []
candidate_paths.extend(libinfo.find_lib_path())
# When running from source, the configs directory will be located one directory above the
# native libraries, so covering that case.
candidate_paths.extend(
[os.path.abspath(os.path.join(lib_path, "..")) for lib_path in libinfo.find_lib_path()]
)
for path in candidate_paths:
configs_path = os.path.join(os.path.dirname(path), "configs")
if os.path.isdir(configs_path):
CONFIGS_JSON_DIR = configs_path
break
else:
raise ConfigsJsonNotFoundError()
return CONFIGS_JSON_DIR
def find_json_file(name, path):
"""search for json file given file name a path
Parameters
----------
name: string
the file name need to be searched
path: string
path to search at
Returns
-------
string
the full path to that file
"""
match = ""
for root, _dirs, files in os.walk(path):
if name in files:
match = os.path.join(root, name)
break
return match
def read_and_convert_json_into_dict(config_args):
"""Read json configuration file and return a dictionary with all parameters
Parameters
----------
args: argparse.Namespace
Arguments from command line parser holding the json file path.
Returns
-------
dictionary
dictionary with all the json arguments keys and values
"""
try:
if ".json" not in config_args.config:
config_args.config = config_args.config.strip() + ".json"
if os.path.isfile(config_args.config):
json_config_file = config_args.config
else:
config_dir = get_configs_json_dir()
json_config_file = find_json_file(config_args.config, config_dir)
return json.load(open(json_config_file, "rb"))
except FileNotFoundError:
raise TVMCException(
f"File {config_args.config} does not exist at {config_dir} or is wrong format."
)
def parse_target_from_json(one_target, command_line_list):
"""parse the targets out of the json file struct
Parameters
----------
one_target: dict
dictionary with all target's details
command_line_list: list
list to update with target parameters
"""
target_kind, *sub_type = [
one_target[key] if key == "kind" else (key, one_target[key]) for key in one_target
]
internal_dict = {}
if sub_type:
sub_target_type = sub_type[0][0]
target_value = sub_type[0][1]
internal_dict[f"target_{target_kind}_{sub_target_type}"] = target_value
command_line_list.append(internal_dict)
return target_kind
def convert_config_json_to_cli(json_params):
"""convert all configuration keys & values from dictionary to cli format
Parameters
----------
args: dictionary
dictionary with all configuration keys & values.
Returns
-------
int
list of configuration values in cli format
"""
command_line_list = []
for param_key in json_params:
if param_key == "targets":
target_list = [
parse_target_from_json(one_target, command_line_list)
for one_target in json_params[param_key]
]
internal_dict = {}
internal_dict["target"] = ", ".join(map(str, target_list))
command_line_list.append(internal_dict)
elif param_key in ("executor", "runtime"):
for key, value in json_params[param_key].items():
if key == "kind":
kind = f"{value}_"
new_dict_key = param_key
else:
new_dict_key = f"{param_key}_{kind}{key}"
internal_dict = {}
internal_dict[new_dict_key.replace("-", "_")] = value
command_line_list.append(internal_dict)
elif isinstance(json_params[param_key], dict):
internal_dict = {}
modify_param_key = param_key.replace("-", "_")
internal_dict[modify_param_key] = []
for key, value in json_params[param_key].items():
internal_dict[modify_param_key].append(f"{key}={value}")
command_line_list.append(internal_dict)
else:
internal_dict = {}
internal_dict[param_key.replace("-", "_")] = json_params[param_key]
command_line_list.append(internal_dict)
return command_line_list
| 6,464 | 30.536585 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/autotuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
Provides support to auto-tuning networks using AutoTVM.
"""
import os.path
import logging
import time
from copy import deepcopy
from typing import Any, Optional, Dict, List, Union
from urllib.parse import urlparse
import tvm
from tvm import autotvm, auto_scheduler
from tvm.auto_scheduler.search_task import HardwareParams
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from tvm.target import Target
from . import TVMCException, composite_target, frontends
from .main import register_parser
from .model import TVMCModel
from .target import target_from_cli, generate_target_args, reconstruct_target_args
from .shape_parser import parse_shape_string
from .transform import generate_transform_args, parse_graph_transform_args, apply_graph_transforms
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_tune_parser(subparsers, _, json_params):
"""Include parser for 'tune' subcommand"""
parser = subparsers.add_parser("tune", help="auto-tune a model")
parser.set_defaults(func=drive_tune)
parser.add_argument(
"--early-stopping",
type=int,
help="minimum number of trials before early stopping",
)
# There is some extra processing required to define the actual default value
# for --min-repeat-ms. This is done in `tune_model`.
parser.add_argument(
"--min-repeat-ms",
default=None,
type=int,
help="minimum time to run each trial, in milliseconds. "
"Defaults to 0 on x86 and 1000 on all other targets",
)
parser.add_argument(
"--model-format",
choices=frontends.get_frontend_names(),
help="specify input model format",
)
parser.add_argument(
"--number",
default=10,
type=int,
help="number of runs a single repeat is made of. "
"The final number of tuning executions is: "
"(1 + number * repeat)",
)
parser.add_argument(
"-o",
"--output",
required=True,
help="output file to store the tuning records for the tuning process",
)
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity.")
parser.add_argument(
"--parallel",
default=4,
type=int,
help="the maximum number of parallel devices to use when tuning",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="how many times to repeat each measurement",
)
parser.add_argument(
"--rpc-key",
help="the RPC tracker key of the target device. "
"Required when --rpc-tracker is provided.",
)
parser.add_argument(
"--rpc-tracker",
help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, "
"e.g. '192.168.0.100:9999'",
)
generate_target_args(parser)
parser.add_argument(
"--target-host",
help="the host compilation target.",
)
parser.add_argument("--timeout", type=int, default=10, help="compilation timeout, in seconds")
parser.add_argument(
"--trials",
type=int,
default=1000,
help="the maximum number of tuning trials to perform",
)
parser.add_argument(
"--tuning-records",
metavar="PATH",
help="path to an auto-tuning log file by AutoTVM.",
)
generate_transform_args(parser)
parser.add_argument(
"--enable-autoscheduler",
help="enable tuning the graph through the AutoScheduler tuner",
action="store_true",
)
parser.add_argument(
"--tasks",
default="all",
help="which tasks should be tuned, i.e. 0 0,2 3-5 all list",
)
auto_scheduler_group = parser.add_argument_group(
"AutoScheduler options",
"AutoScheduler options, used when --enable-autoscheduler is provided",
)
auto_scheduler_group.add_argument(
"--cache-line-bytes",
type=int,
help="the size of cache line in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--num-cores",
type=int,
help="the number of device cores. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--vector-unit-bytes",
type=int,
help="the width of vector units in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-shared-memory-per-block",
type=int,
help="the max shared memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-local-memory-per-block",
type=int,
help="the max local memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-threads-per-block",
type=int,
help="the max number of threads per block. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-vthread-extent",
type=int,
help="the max vthread extent. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--warp-size",
type=int,
help="the thread numbers of a warp. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--include-simple-tasks",
help="whether to extract simple tasks that do not include complicated ops",
action="store_true",
)
auto_scheduler_group.add_argument(
"--log-estimated-latency",
help="whether to log the estimated latency to the file after tuning a task",
action="store_true",
)
autotvm_group = parser.add_argument_group(
"AutoTVM options",
"AutoTVM options, used when the AutoScheduler is not enabled",
)
autotvm_group.add_argument(
"--tuner",
choices=[
"ga",
"gridsearch",
"random",
"xgb",
"xgb_knob",
"xgb_itervar",
"xgb_curve",
"xgb_rank",
"xgb_rank_knob",
"xgb_rank_itervar",
"xgb_rank_curve",
"xgb_rank_binary",
"xgb_rank_binary_knob",
"xgb_rank_binary_itervar",
"xgb_rank_binary_curve",
],
default="xgb",
help="type of tuner to use when tuning with autotvm.",
)
# TODO (@leandron) This is a path to a physical file, but
# can be improved in future to add integration with a modelzoo
# or URL, for example.
parser.add_argument("FILE", help="path to the input model file")
parser.add_argument(
"--input-shapes",
help="specify non-generic shapes for model to run, format is "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]"',
type=parse_shape_string,
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
def drive_tune(args):
"""Invoke auto-tuning with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
"""
if not os.path.isfile(args.FILE):
raise TVMCException(
f"Input file '{args.FILE}' doesn't exist, is a broken symbolic link, or a directory."
)
tvmc_model = frontends.load_model(args.FILE, args.model_format, shape_dict=args.input_shapes)
# Specify hardware parameters, although they'll only be used if autoscheduling.
hardware_params = auto_scheduler.HardwareParams(
num_cores=args.num_cores,
vector_unit_bytes=args.vector_unit_bytes,
cache_line_bytes=args.cache_line_bytes,
max_shared_memory_per_block=args.max_shared_memory_per_block,
max_local_memory_per_block=args.max_local_memory_per_block,
max_threads_per_block=args.max_threads_per_block,
max_vthread_extent=args.max_vthread_extent,
warp_size=args.warp_size,
target=args.target,
target_host=args.target_host,
)
if args.rpc_tracker:
parsed_url = urlparse("//%s" % args.rpc_tracker)
rpc_hostname = parsed_url.hostname
rpc_port = parsed_url.port or 9090
logger.info("RPC tracker hostname: %s", rpc_hostname)
logger.info("RPC tracker port: %s", rpc_port)
if not args.rpc_key:
raise TVMCException("need to provide an RPC tracker key (--rpc-key) for remote tuning")
else:
rpc_hostname = None
rpc_port = None
transform_args = parse_graph_transform_args(args)
tune_model(
tvmc_model,
args.target,
tuning_records=args.output,
prior_records=args.tuning_records,
enable_autoscheduler=args.enable_autoscheduler,
rpc_key=args.rpc_key,
hostname=rpc_hostname,
port=rpc_port,
trials=args.trials,
target_host=args.target_host,
tuner=args.tuner,
min_repeat_ms=args.min_repeat_ms,
early_stopping=args.early_stopping,
timeout=args.timeout,
repeat=args.repeat,
number=args.number,
parallel=args.parallel,
hardware_params=hardware_params,
include_simple_tasks=args.include_simple_tasks,
log_estimated_latency=args.log_estimated_latency,
additional_target_options=reconstruct_target_args(args),
tasks_filter=args.tasks,
**transform_args,
)
def filter_tasks(
tasks: Union[List[auto_scheduler.SearchTask], List[autotvm.task.Task]],
expr: str,
):
"""Utility to filter a list of tasks (AutoTVM or AutoScheduler) based on
a user-supplied string expression.
Parameters
----------
tasks: list
A list of extracted AutoTVM or AutoScheduler tasks.
expr: str
User-supplied expression to be used for filtering.
"""
assert isinstance(expr, str), "Expected filter expression of string type"
assert len(expr) > 0, "Got empty filter expression"
# groups of keywords are comma-separated
splitted = expr.split(",")
do_list = False
do_filter = False
selected = []
for item in splitted:
if item in ["list", "help"]:
do_list = True
elif item in ["all"]:
selected = list(range(len(tasks)))
else:
do_filter = True
if "-" in item:
assert item.count("-") == 1, "Malformed range expression"
assert len(item) > 1, "Missing lhs or rhs for range expression"
lhs, rhs = item.split("-")[:2]
lhs = int(lhs) if lhs else 0
rhs = int(rhs) if rhs else len(tasks) - 1
assert 0 <= lhs < len(tasks), "Left-hand side expression out of range"
assert 0 <= rhs < len(tasks), "Right-hand side expression out of range"
selected.extend(list(range(lhs, rhs + 1)))
else:
assert isinstance(item, str)
idx = int(item)
assert 0 <= idx < len(tasks), "Task index out of range"
selected.append(idx)
if do_filter:
# remove duplicates
selected = list(set(selected))
tasks = [task for i, task in enumerate(tasks) if i in selected]
return tasks, do_list
def gen_task_list(
tasks: Union[List[auto_scheduler.SearchTask], List[autotvm.task.Task]],
enable_autoscheduler: bool,
):
"""Utility for printing a list of tasks (AutoTVM or AutoScheduler)
to the terminal.
Parameters
----------
tasks: list
A list of extracted AutoTVM or AutoScheduler tasks.
enable_autoscheduler: bool
Wether the tasks are extracted with AutoScheduler or AutoTVM.
"""
ret = "Available Tasks for tuning:\n"
def _trunc_helper(text, length):
return text if len(text) < length else text[: length - 3] + "..."
ret += "\n".join(
[
" {}. {}".format(
i, _trunc_helper("Unnamed" if len(task.desc) == 0 else task.desc, 100)
)
if enable_autoscheduler
else " {}. {} (len={})".format(
i,
_trunc_helper(str(task), 100),
"?" if task.config_space is None else len(task.config_space),
)
for i, task in enumerate(tasks)
]
)
return ret
def tune_model(
tvmc_model: TVMCModel,
target: str,
tuning_records: Optional[str] = None,
prior_records: Optional[str] = None,
enable_autoscheduler: bool = False,
rpc_key: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[Union[int, str]] = 9090,
trials: int = 10000,
target_host: Optional[str] = None,
tuner: str = "xgb",
min_repeat_ms: Optional[int] = None,
early_stopping: Optional[int] = None,
timeout: int = 10,
repeat: int = 1,
number: int = 10,
parallel: int = 4,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
log_estimated_latency: bool = False,
additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None,
tasks_filter: str = "all",
desired_layout: Optional[str] = None,
desired_layout_ops: Optional[List[str]] = None,
mixed_precision: bool = False,
mixed_precision_ops: Optional[List[str]] = None,
mixed_precision_calculation_type: Optional[str] = None,
mixed_precision_acc_type: Optional[str] = None,
):
"""Use tuning to automatically optimize the functions in a model.
Parameters
----------
tvmc_model : TVMCModel
The model to be optimized.
target : str
Compilation target as plain string, inline JSON or path to a JSON file.
tuning_records: str, optional
The path to a file that tuning results will be saved to. If not specified,
a temporary file will be used.
prior_records: str, optional
A path to previous tuning results that will be used to hot-start the tuning
cost model if provided.
enable_autoscheduler : bool, optional
When true, use autoscheduling rather than autotvm. This should produce
faster kernels for compatible model-target pairs.
rpc_key : str, optional
The RPC tracker key of the target device. Required when rpc_tracker is provided.
hostname : str, optional
The IP address of an RPC tracker, used when benchmarking remotely.
port : int or str, optional
The port of the RPC tracker to connect to. Defaults to 9090.
trials : int, optional
The number of schedules to try out for the entire model. Note that the default
value is chosen as a decent average for most models, but larger models may need
more trials to reach a good result while smaller models will converge with fewer
trials.
tuner : str, optional
The type of tuner to use when tuning with autotvm. Can be one of
"ga", "gridsearch", "random", "xgb", "xgb_knob", "xgb_itervar", "xgb_curve",
"xgb_rank", "xgb_rank_knob", "xgb_rank_itervar", "xgb_rank_binary", "xgb_rank_binary_knob",
"xgb_rank_binary_itervar" and "xgb_rank_binary_curve".
min_repeat_ms : int, optional
Minimum time to run each trial. Defaults to 0 on x86 and 1000 on other targets.
early_stopping : int, optional
When specified, stop tuning after this number of trials if results aren't improving.
timeout : int, optional,
If a kernel trial lasts longer than this duration in seconds, it will be
considered a failure.
repeat : int, optional
How many times each measurement should be repeated.
number : int, optional
The number of runs a single repeat is made of.
parallel : int, optional
The maximum number of parallel devices to use when tuning.
hardware_params : auto_scheduler.HardwareParams, optional
When using the autoscheduler, this object defines the configuration of the target hardware.
include_simple_tasks : bool, optional
Whether to extract simple operations or only computationally intensive ones when using
the autoscheduler.
log_estimated_latency : bool, optional
If using the autoscheduler, write the estimated latency at each step of tuning to file.
additional_target_options: Optional[Dict[str, Dict[str, Any]]]
Additional target options in a dictionary to combine with initial Target arguments
tasks_filter : str, optional
Filter which tasks should be tuned or output a list of the extracted tasks.
Examples: 0 0,2 3-5 all list
desired_layout: str, optional
Can be one of "NCHW" or "NHWC". When specified, compatible operations in the graph
will have their layout set to this format. Tasks will then be tuned using this
specified layout.
desired_layout_ops: list[str], optional
The list of operators to be transformed with desired layout.
mixed_precision: bool
To enable mixed precision transformation.
mixed_precision_ops: list[str], optional
The list of operators to be converted to mixed precision.
mixed_precision_calculation_type: str
The calculation dtype to be used while mixed precision.
mixed_precision_acc_type: str
The accumulation data type to be used while mixed precision.
Returns
-------
tuning_records : str
The path to the produced tuning log file.
"""
transform_args = parse_graph_transform_args(locals())
target, extra_targets = target_from_cli(target, additional_target_options)
target, target_host = Target.canon_target_and_host(target, target_host)
# TODO(jwfromm) Remove this deepcopy once AlterOpLayout bug that mutates source
# model is fixed. For now, creating a clone avoids the issue.
mod = deepcopy(tvmc_model.mod)
params = tvmc_model.params
with tvm.transform.PassContext(opt_level=3):
if tuning_records is None:
tuning_records = tvmc_model.default_tuning_records_path()
for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
mod = partition_function(mod, params, **codegen_from_cli["opts"])
# min_repeat_ms should be:
# a. the value provided by the user, if any, or
# b. 0ms in case target is "cpu"; otherwise 1000ms
if min_repeat_ms is None:
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.info("Default --min-repeat-ms for this target is %s", min_repeat_ms)
if rpc_key:
if hostname is None or port is None:
raise TVMCException(
"You must provide a hostname and port to connect to a remote RPC device."
)
if isinstance(port, str):
port = int(port)
logger.info("Tuning will be performed on device %s at %s:%d.", rpc_key, hostname, port)
runner_ctor = auto_scheduler.RPCRunner if enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
key=rpc_key,
host=hostname,
port=port,
number=number,
repeat=repeat,
n_parallel=parallel,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
else:
logger.info("Starting localhost tuning.")
runner_ctor = (
auto_scheduler.LocalRPCMeasureContext
if enable_autoscheduler
else autotvm.LocalRunner
)
local_server = runner_ctor(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
# For autoscheduling on some devices, we need to maintain a
# LocalRPCMeasureContext object.
if enable_autoscheduler:
runner = local_server.runner
else:
runner = local_server
if enable_autoscheduler:
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
transform_args=transform_args,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
transform_args=transform_args,
)
# Filter extracted tasks by provided user expression
if tasks_filter:
tasks, do_list = filter_tasks(tasks, tasks_filter)
if do_list:
print(gen_task_list(tasks, enable_autoscheduler))
return None
if len(tasks) == 0:
logger.info("No tasks have been selected for tuning.")
return None
else:
logger.info("Selected %s tasks for tuning.", len(tasks))
if enable_autoscheduler:
# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=trials,
measure_callbacks=[auto_scheduler.RecordToFile(tuning_records)],
runner=runner,
early_stopping=early_stopping,
)
logger.info("Autoscheduling with configuration: %s", tuning_options)
# Schedule the tasks (i.e., produce a schedule for each task)
schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
else:
# In autotvm, trials is specified per task. We can convert the per-model input
# provided to per-task trials by dividing by the number of tasks.
trials = int(max(1, trials / max(len(tasks), 1)))
logger.info("Autotuning with %d trials per task.", trials)
tuning_options = {
"tuner": tuner,
"trials": trials,
"early_stopping": early_stopping,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": prior_records,
}
logger.info("Autotuning with configuration: %s", tuning_options)
tune_tasks(tasks, tuning_records, **tuning_options)
return tuning_records
def autotvm_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
transform_args: Optional[Dict[str, Any]] = None,
):
"""Get the autotvm tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
transform_args: dict, optional
Graph transformation arguments that are applied to the relay module.
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
"""
target, target_host = Target.canon_target_and_host(target, target_host)
mod = apply_graph_transforms(mod, transform_args)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
)
return tasks
def autoscheduler_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
transform_args: Optional[Dict[str, Any]] = None,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
):
"""Get the autoscheduler tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
transform_args: dict, optional
Graph transformation arguments that are applied to the relay module.
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
weights : List[int]
the weight (i.e. the number of appearance) of extracted tasks
"""
target, target_host = Target.canon_target_and_host(target, target_host)
mod = apply_graph_transforms(mod, transform_args)
# Extract the tasks
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=target,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
return tasks, task_weights
def schedule_tasks(
tasks: List[auto_scheduler.SearchTask],
task_weights: List[float],
tuning_options: auto_scheduler.TuningOptions,
prior_records: Optional[str] = None,
log_estimated_latency: bool = False,
):
"""Generate the schedules for the different tasks (i.e., subgraphs) contained in the module.
Store the schedules in a json file that will be used later by the compiler.
Parameters
----------
tasks : list
A list of auto_scheduler.SearchTask to tune.
task_weights : list
The weight (i.e. the number of appearance) of extracted tasks
tuning_options: auto_scheduler.TuningOptions
The options of tuning
prior_records : str, optional
The json file used to preload the autoscheduler
log_estimated_latency : bool, optional
If true, writes the estimated runtime of the model during each step of tuning to file.
"""
if not log_estimated_latency:
callbacks = [auto_scheduler.task_scheduler.PrintTableInfo()]
else:
callbacks = [
auto_scheduler.task_scheduler.PrintTableInfo(),
auto_scheduler.task_scheduler.LogEstimatedLatency(("total_latency.tsv")),
]
# Create the scheduler
tuner = auto_scheduler.TaskScheduler(
tasks, task_weights, load_log_file=prior_records, callbacks=callbacks
)
# Tune the tasks
tuner.tune(tuning_options)
def tune_tasks(
tasks: List[autotvm.task.Task],
log_file: str,
measure_option: autotvm.measure_option,
tuner: str,
trials: int,
early_stopping: Optional[int] = None,
tuning_records: Optional[str] = None,
):
"""Tune a list of tasks and output the history to a log file.
Parameters
----------
tasks : list
A list of autotvm.Tasks to tune.
log_file : str
A file to output the tuning history, in JSON.
measure_option : autotvm.measure_option
Options to build and run a tuning task.
tuner : str
Which tuner to use.
trials : int
The maximum number of tuning trials to perform.
early_stopping : int, optional
The minimum number of tuning trials to perform.
This will be equal to 'trials' if not specified.
tuning_records: str, optional
Path to the file produced by the tuning, to be used during
tuning.
"""
if not tasks:
logger.warning("there were no tasks found to be tuned")
return
if not early_stopping:
early_stopping = trials
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise TVMCException("invalid tuner: %s " % tuner)
# If transfer learning is being used, load the existing results
if tuning_records and os.path.exists(tuning_records):
logger.info("loading tuning records from %s", tuning_records)
start_time = time.time()
tuner_obj.load_history(autotvm.record.load_from_file(tuning_records))
logging.info("loaded history in %.2f sec(s)", time.time() - start_time)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(min(trials, len(tsk.config_space)), prefix=prefix),
autotvm.callback.log_to_file(log_file),
],
)
| 31,628 | 35.565318 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/arguments.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Argument Parsing
"""
import argparse
from tvm.driver.tvmc import TVMCException
class TVMCSuppressedArgumentParser(argparse.ArgumentParser):
"""
A silent ArgumentParser class.
This class is meant to be used as a helper for creating dynamic parsers in
TVMC. It will create a "supressed" parser based on an existing one (parent)
which does not include a help message, does not print a usage message (even
when -h or --help is passed) and does not exit on invalid choice parse
errors but rather throws a TVMCException so it can be handled and the
dynamic parser construction is not interrupted prematurely.
"""
def __init__(self, parent, **kwargs):
# Don't add '-h' or '--help' options to the newly created parser. Don't print usage message.
# 'add_help=False' won't supress existing '-h' and '--help' options from the parser (and its
# subparsers) present in 'parent'. However that class is meant to be used with the main
# parser, which is created with `add_help=False` - the help is added only later. Hence it
# the newly created parser won't have help options added in its (main) root parser. The
# subparsers in the main parser will eventually have help activated, which is enough for its
# use in TVMC.
super().__init__(parents=[parent], add_help=False, usage=argparse.SUPPRESS, **kwargs)
def exit(self, status=0, message=None):
# Don't exit on error when parsing the command line.
# This won't catch all the errors generated when parsing tho. For instance, it won't catch
# errors due to missing required arguments. But this will catch "error: invalid choice",
# which is what it's necessary for its use in TVMC.
raise TVMCException()
| 2,586 | 47.811321 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/fmtopt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Utils to format help text for project options.
"""
from textwrap import TextWrapper
# Maximum column length for accommodating option name and its choices.
# Help text is placed after it in a new line.
MAX_OPTNAME_CHOICES_TEXT_COL_LEN = 80
# Maximum column length for accommodating help text.
# 0 turns off formatting for the help text.
MAX_HELP_TEXT_COL_LEN = 0
# Justification of help text placed below option name + choices text.
HELP_TEXT_JUST = 2
def format_option(option_text, help_text, default_text, required=True):
"""Format option name, choices, and default text into a single help text.
Parameters
----------
options_text: str
String containing the option name and option's choices formatted as:
optname={opt0, opt1, ...}
help_text: str
Help text string.
default_text: str
Default text string.
required: bool
Flag that controls if a "(required)" text mark needs to be added to the final help text to
inform if the option is a required one.
Returns
-------
help_text_just: str
Single justified help text formatted as:
optname={opt0, opt1, ... }
HELP_TEXT. "(required)" | "Defaults to 'DEFAULT'."
"""
optname, choices_text = option_text.split("=", 1)
# Prepare optname + choices text chunck.
optname_len = len(optname)
wrapper = TextWrapper(width=MAX_OPTNAME_CHOICES_TEXT_COL_LEN - optname_len)
choices_lines = wrapper.wrap(choices_text)
# Set first choices line which merely appends to optname string.
# No justification is necessary for the first line since first
# line was wrapped based on MAX_OPTNAME_CHOICES_TEXT_COL_LEN - optname_len,
# i.e. considering optname_len, hence only append justified choices_lines[0] line.
choices_just_lines = [optname + "=" + choices_lines[0]]
# Justify the remaining lines based on first optname + '='.
for line in choices_lines[1:]:
line_len = len(line)
line_just = line.rjust(
optname_len + 1 + line_len
) # add 1 to align after '{' in the line above
choices_just_lines.append(line_just)
choices_text_just_chunk = "\n".join(choices_just_lines)
# Prepare help text chunck.
help_text = help_text[0].lower() + help_text[1:]
if MAX_HELP_TEXT_COL_LEN > 0:
wrapper = TextWrapper(width=MAX_HELP_TEXT_COL_LEN)
help_text_lines = wrapper.wrap(help_text)
else:
# Don't format help text.
help_text_lines = [help_text]
help_text_just_lines = []
for line in help_text_lines:
line_len = len(line)
line_just = line.rjust(HELP_TEXT_JUST + line_len)
help_text_just_lines.append(line_just)
help_text_just_chunk = "\n".join(help_text_just_lines)
# An option might be required for one method but optional for another one.
# If the option is required for one method it means there is no default for
# it when used in that method, hence suppress default text in that case.
if default_text and not required:
help_text_just_chunk += " " + default_text
if required:
help_text_just_chunk += " (required)"
help_text_just = choices_text_just_chunk + "\n" + help_text_just_chunk
return help_text_just
| 4,074 | 33.82906 | 98 | py |
tvm | tvm-main/python/tvm/driver/tvmc/workspace_pools.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Functions for processing dynamic workspace pool TVMC args
"""
import logging
import re
from tvm.driver.tvmc import TVMCException
from tvm.target import Target
from tvm.ir.memory_pools import PoolInfoProperties, WorkspaceMemoryPools, WorkspacePoolInfo
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
def generate_workspace_pools_args(parser):
"""Generates arguments for each Workspace Pools's options"""
parser.add_argument(
"--workspace-pools",
help="""The name of the memory pool
Example usage: --workspace-pools=flash""",
)
parser.add_argument(
"--workspace-pools-targets",
help="""The name of the targets specified for the memory pool
Example usage: --workspace-pools-targets=flash:llvm""",
action="append",
)
parser.add_argument(
"--workspace-pools-size-hint-bytes",
nargs="?",
help="""The expected size hint to be used by the allocator.
Example usage: --workspace-pools-size-hint-bytes=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-clock-frequency-hz",
nargs="?",
help="""The clock frequency that the memory pool runs at in Hz.
Example usage: --workspace-pools-clock-frequency-hz=flash:70000000""",
action="append",
)
parser.add_argument(
"--workspace-pools-read-bandwidth-bytes-per-cycle",
nargs="?",
help="""The read bandwidth of the memory pool in bytes/cycle.
Example usage: --workspace-pools-read-bandwidth-bytes-per-cycle=flash:4""",
action="append",
)
parser.add_argument(
"--workspace-pools-write-bandwidth-bytes-per-cycle",
nargs="?",
help="""The write bandwidth of the memory pool in bytes/cycle.
Example usage: --workspace-pools-write-bandwidth-bytes-per-cycle=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-read-latency-cycles",
nargs="?",
help="""The read latency of the memory pool in cycles.
Example usage: --workspace-pools-read-latency-cycles=flash:4""",
action="append",
)
parser.add_argument(
"--workspace-pools-write-latency-cycles",
nargs="?",
help="""The write latency of the memory pool in cycles.
Example usage: --workspace-pools-write-latency-cycles=flash:8""",
action="append",
)
parser.add_argument(
"--workspace-pools-target-burst-bytes",
help="""The burst length of the memory pool in bytes per target.
Example usage: --workspace-pools-target-burst-bytes=flash:accel:1""",
action="append",
)
def _parse_target_burst(attr_str, pool_name):
if pool_name not in attr_str:
return {}
return {target: int(attr_str[pool_name][target]) for target in attr_str[pool_name]}
def _parse_target_string(attr_str, targets, pool_name):
if attr_str is None:
raise TVMCException(f'No target specified for Workspace Pool "{pool_name}"')
target_name = [re.split(",", attr_str)]
matched_targets = [
target
for target in targets
if any(target.kind.name in target_string_match for target_string_match in target_name[0])
]
if not matched_targets:
raise TVMCException(f'Workspace Pool "{pool_name}" using undefined Target "{target_name}"')
return matched_targets
def _split_pools_to_pool_names(attr_str):
return re.split(",", attr_str) if attr_str else []
def _parse_target_attributes_of_pool_name(attr_str, targets):
if not targets or attr_str is None:
return {}
target_attributes = {}
for pool_values in attr_str:
pool_name, target_name, target_value = re.split(":", pool_values)
if pool_name not in target_attributes:
target_attributes[pool_name] = {}
matched_targets = [target for target in targets if target_name == target.kind.name]
if matched_targets:
target_attributes[pool_name][matched_targets[0]] = target_value
else:
raise TVMCException(
"The workspace pool target specification "
"needs to contain a subset of the same TVM "
"targets as when specifying targets to use."
)
return target_attributes
def _parse_attribute_of_pool_name(attr_str):
return dict(pool.split(":", maxsplit=1) for pool in attr_str) if attr_str else {}
def workspace_pools_recombobulate(parsed, targets, extra_target):
"""Reconstructs the Workspace Pools args and returns a WorkspaceMemoryPool object"""
WORKSPACE_POOL_PARAMS = [
"workspace_pools_size_hint_bytes",
"workspace_pools_targets",
"workspace_pools_clock_frequency_hz",
"workspace_pools_read_bandwidth_bytes_per_cycle",
"workspace_pools_write_bandwidth_bytes_per_cycle",
"workspace_pools_read_latency_cycles",
"workspace_pools_write_latency_cycles",
]
WORKSPACE_POOL_TARGET_PARAMS = [
"workspace_pools_target_burst_bytes",
]
workspace_pools = _split_pools_to_pool_names(parsed.workspace_pools)
if not workspace_pools:
return None
parse_attribute_to_pool_name = {
workspace_pool_param: _parse_attribute_of_pool_name(getattr(parsed, workspace_pool_param))
for workspace_pool_param in WORKSPACE_POOL_PARAMS
}
parse_target_burst_bytes_to_pool = {
workspace_pool_param: _parse_target_attributes_of_pool_name(
getattr(parsed, workspace_pool_param), targets
)
for workspace_pool_param in WORKSPACE_POOL_TARGET_PARAMS
}
# Load extra targets from CLI
additional_targets = []
for t in extra_target:
additional_targets.append(Target(t["raw"], host=targets[0].host or targets[0]))
target = targets + additional_targets
if targets[0].host:
target.append(targets[0].host)
return WorkspaceMemoryPools(
[
WorkspacePoolInfo(
pool_name,
targets=_parse_target_string(
parse_attribute_to_pool_name["workspace_pools_targets"].get(pool_name),
target,
pool_name,
),
pool_info_properties=PoolInfoProperties(
size_hint_bytes=int(
parse_attribute_to_pool_name["workspace_pools_size_hint_bytes"].get(
pool_name, -1
)
),
clock_frequency_hz=int(
parse_attribute_to_pool_name["workspace_pools_clock_frequency_hz"].get(
pool_name, -1
)
),
read_bandwidth_bytes_per_cycle=int(
parse_attribute_to_pool_name[
"workspace_pools_read_bandwidth_bytes_per_cycle"
].get(pool_name, -1)
),
write_bandwidth_bytes_per_cycle=int(
parse_attribute_to_pool_name[
"workspace_pools_write_bandwidth_bytes_per_cycle"
].get(pool_name, -1)
),
read_latency_cycles=int(
parse_attribute_to_pool_name["workspace_pools_read_latency_cycles"].get(
pool_name, 0
)
),
write_latency_cycles=int(
parse_attribute_to_pool_name["workspace_pools_write_latency_cycles"].get(
pool_name, 0
)
),
target_burst_bytes=_parse_target_burst(
parse_target_burst_bytes_to_pool["workspace_pools_target_burst_bytes"],
pool_name,
),
),
)
for pool_name in workspace_pools
]
)
| 8,938 | 36.558824 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/registry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains functions for processing registry based inputs for the TVMC CLI
"""
from tvm.driver.tvmc import TVMCException
# We can't tell the type inside an Array but all current options are strings so
# it can default to that. Bool is used alongside Integer but aren't distinguished
# between as both are represented by IntImm
INTERNAL_TO_NATIVE_TYPE = {"runtime.String": str, "IntImm": int, "Array": str}
INTERNAL_TO_HELP = {"runtime.String": " string", "IntImm": "", "Array": " options"}
def _generate_registry_option_args(parser, registry, name):
target_group = parser.add_argument_group(f"{registry.flag_registry_name} {name}")
for option_name, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
target_group.add_argument(
f"--{registry.flag_registry_name}-{name}-{option_name}",
type=INTERNAL_TO_NATIVE_TYPE[option_type],
help=(
f"{registry.flag_registry_name.title()} "
+ "{name} {option_name}{INTERNAL_TO_HELP[option_type]}"
),
)
def generate_registry_args(parser, registry, default=None):
"""Walks through the given registry and generates arguments for each of the available options"""
parser.add_argument(
f"--{registry.flag_registry_name}",
help=f"{registry.flag_registry_name.title()} to compile the model with",
required=False,
default=default,
)
names = registry.list_registered()
for name in names:
_generate_registry_option_args(parser, registry, name)
def _reconstruct_registry_options(args, registry, name):
options = {}
for option, option_type in registry.list_registered_options(name).items():
if option_type in INTERNAL_TO_NATIVE_TYPE:
var_name = f"{registry.flag_registry_name}_{name}_{option.replace('-', '_')}"
option_value = getattr(args, var_name)
if option_value is not None:
options[option] = option_value
return options
def reconstruct_registry_entity(args, registry):
"""Reconstructs an entity from arguments generated from a registry"""
possible_names = registry.list_registered()
name = getattr(args, registry.flag_registry_name)
if name is None:
return None
if name not in possible_names:
raise TVMCException(f'{registry.flag_registry_name.title()} "{name}" is not defined')
reconstructed = {
possible_name: _reconstruct_registry_options(args, registry, possible_name)
for possible_name in possible_names
}
for possible_name in possible_names:
if possible_name != name and reconstructed[possible_name]:
first_option = list(reconstructed[possible_name])[0]
raise TVMCException(
f"Passed --{registry.flag_registry_name}-{possible_name}-{first_option} "
f"but did not specify {possible_name} executor"
)
return registry(name, reconstructed[name])
| 3,863 | 40.548387 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
# pylint: disable=unused-argument
"""
TVMC Graph Transforms
"""
from tvm import relay, transform
from tvm.driver.tvmc import TVMCException
def generate_mixed_precision_rule(acc_dtype):
def _mixed_precision_rule(call_node: "relay.Call", mixed_precision_type: str):
return [
relay.transform.mixed_precision.MIXED_PRECISION_ALWAYS,
acc_dtype,
mixed_precision_type,
]
return _mixed_precision_rule
class MixedPrecision(object):
"""Temporarily changes attr of ops to enable required precision."""
def __init__(self, ops, acc_type):
"""Saves the required info for RAII pattern usage.
Parameters
----------
ops : list
list of operators
acc_type: str
Output or accumulation precision to be used.
"""
self.older_attr = {}
self.ops = ops
self.acc_type = acc_type
self.attr_key = "FTVMMixedPrecisionConversionType"
def __enter__(self):
for op_name in self.ops:
op = relay.op.get(op_name)
self.older_attr[op_name] = op.get_attr(self.attr_key)
op.reset_attr(self.attr_key)
op.set_attr(self.attr_key, generate_mixed_precision_rule(self.acc_type))
return self
def __exit__(self, ptype, value, trace):
for op_name in self.ops:
op = relay.op.get(op_name)
op.reset_attr(self.attr_key)
if self.older_attr[op_name]:
op.set_attr(self.attr_key, self.older_attr[op_name])
def convert_to_mixed_precision(mod, ops=None, calculation_type="float16", acc_type="float16"):
"""Converts the operator datatypes
Parameters
----------
mod : tvm.IRModule
The relay module to convert.
ops : list
List of operators to be precision converted.
calculation_type: str
Input precision to be used.
acc_type: str
Output or accumulation precision to be used.
Returns
-------
mod : tvm.IRModule
The converted module.
"""
if ops is None:
ops = ["nn.conv2d", "nn.dense"]
with MixedPrecision(ops, acc_type):
seq = transform.Sequential(
[relay.transform.InferType(), relay.transform.ToMixedPrecision(calculation_type)]
)
with transform.PassContext(
config={"relay.ToMixedPrecision.keep_orig_output_dtype": True}, opt_level=3
):
try:
return seq(mod)
except Exception as err:
raise TVMCException("Error converting mixed precision : {0}".format(str(err)))
def convert_graph_layout(mod, desired_layouts, ops=None):
"""Alter the layout of the input graph.
Parameters
----------
mod : tvm.IRModule
The relay module to convert.
desired_layouts : list[str]
The layouts to convert to.
Expects either a single element or one str per operator.
Can be only data layouts or combination of both, e.g. NHWC:HWIO
ops : list
List of operators to be layout converted.
Returns
-------
mod : tvm.IRModule
The converted module.
"""
if ops is None:
ops = ["nn.conv2d", "nn.conv2d_transpose", "qnn.conv2d"]
if not isinstance(desired_layouts, list):
# For backwards compatibility
assert isinstance(desired_layouts, str)
desired_layouts = [desired_layouts]
if len(desired_layouts) != len(ops):
if len(desired_layouts) != 1:
raise TVMCException(
"Expected 1 or {} layouts but got {}".format(len(ops), len(desired_layouts))
)
desired_layouts = desired_layouts * len(ops)
def layout_helper(layout):
if ":" in layout:
data_layout, kernel_layout = layout.split(":", 1)
else:
data_layout = layout
kernel_layout = "default"
return [data_layout, kernel_layout]
desired_layouts = {op: layout_helper(desired_layouts[i]) for i, op in enumerate(ops)}
# Convert the layout of the graph where possible.
seq = transform.Sequential(
[
relay.transform.RemoveUnusedFunctions(),
relay.transform.ConvertLayout(desired_layouts),
relay.transform.FoldConstant(),
]
)
try:
return seq(mod)
except Exception as err:
raise TVMCException("Error converting layouts: {}".format(str(err)))
def apply_graph_transforms(mod, args):
"""Alter the layout of the input graph.
Parameters
----------
mod : tvm.IRModule
The relay module to convert.
args : dict
The transform arguments.
Returns
-------
mod : tvm.IRModule
The converted module.
"""
if not args:
return mod
# AlterLayout
if args.get("desired_layout", None):
mod = convert_graph_layout(
mod, args["desired_layout"], args.get("desired_layout_ops", None)
)
# ToMixedPrecision
if args.get("mixed_precision", False):
mod = convert_to_mixed_precision(
mod,
args.get("mixed_precision_ops"),
args.get("mixed_precision_calculation_type"),
args.get("mixed_precision_acc_type"),
)
return mod
def parse_graph_transform_args(args):
"""Parse incoming options for graph transform arguments.
Parameters
----------
args: argparse.Namespace or dict
Arguments.
Returns
-------
transform_args : dict
Graph transform arguments
"""
if not isinstance(args, dict):
args = vars(args)
transform_args = [
"desired_layout",
"desired_layout_ops",
"mixed_precision",
"mixed_precision_ops",
"mixed_precision_calculation_type",
"mixed_precision_acc_type",
]
transform_args = {key: args.get(key, None) for key in transform_args}
return transform_args
def generate_transform_args(parser):
"""Add graph transform related args"""
# AlterLayout
parser.add_argument(
"--desired-layout",
nargs="+",
help="Change the data/kernel layout of the graph. (i.e. NCHW or NHWC:HWIO)"
"This option can be provided multiple times to specify per-operator layouts, "
"e.g. '--desired-layout NHWC:HWIO' (Apply same layout for every operator)."
"e.g. '--desired-layout-ops nn.conv2d nn.avg_pool2d --desired-layout NCHW NHWC'.",
)
parser.add_argument(
"--desired-layout-ops",
default=["nn.conv2d", "nn.conv2d_transpose", "qnn.conv2d"],
nargs="+",
help="List of operators to be layout converted.",
)
# ToMixedPrecision
parser.add_argument(
"--mixed-precision",
help="Enable mixed precision conversion",
action="store_true",
)
parser.add_argument(
"--mixed-precision-ops",
default=["nn.conv2d", "nn.dense"],
nargs="+",
help="List of operators to be converted to mixed precision",
)
parser.add_argument(
"--mixed-precision-calculation-type",
choices=["float16", "float32"],
default="float16",
help="Calculation precision type",
)
parser.add_argument(
"--mixed-precision-acc-type",
choices=["float16", "float32"],
default="float16",
help="Accumulator precision type",
)
| 8,154 | 28.981618 | 94 | py |
tvm | tvm-main/python/tvm/driver/tvmc/pass_list.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language
"""
TVMC Pass List Management
"""
import argparse
import tvm
from tvm._ffi import registry
def parse_pass_list_str(input_string):
"""Parse an input string for existing passes
Parameters
----------
input_string: str
Possibly comma-separated string with the names of passes
Returns
-------
list: a list of existing passes.
"""
_prefix = "relay._transform."
pass_list = input_string.split(",")
missing_list = [
p.strip()
for p in pass_list
if len(p.strip()) > 0 and tvm.get_global_func(_prefix + p.strip(), True) is None
]
if len(missing_list) > 0:
available_list = [
n[len(_prefix) :] for n in registry.list_global_func_names() if n.startswith(_prefix)
]
raise argparse.ArgumentTypeError(
"Following passes are not registered within tvm: {}. Available: {}.".format(
", ".join(missing_list), ", ".join(sorted(available_list))
)
)
return pass_list
| 1,748 | 30.8 | 97 | py |
tvm | tvm-main/python/tvm/driver/tvmc/micro.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-from-import
"""
Provides support for micro targets (microTVM).
"""
import argparse
import os
from pathlib import Path
import shutil
import sys
from . import TVMCException
from .main import register_parser
from .arguments import TVMCSuppressedArgumentParser
from .project import (
get_project_options,
get_and_check_options,
get_project_dir,
)
try:
import tvm.micro.project as project
from tvm.micro import get_microtvm_template_projects
from tvm.micro.build import MicroTVMTemplateProjectNotFoundError
from tvm.micro.project_api.server import ServerError
from tvm.micro.project_api.client import ProjectAPIServerNotFoundError
SUPPORT_MICRO = True
except (ImportError, NameError):
SUPPORT_MICRO = False
@register_parser
def add_micro_parser(subparsers, main_parser, json_params):
"""Includes parser for 'micro' context and associated subcommands:
create-project (create), build, and flash.
"""
if SUPPORT_MICRO is False:
# Don't create 'tvmc micro' parser.
return
# Probe available default platform templates.
templates = {}
for p in ("zephyr", "arduino"):
try:
templates[p] = get_microtvm_template_projects(p)
except MicroTVMTemplateProjectNotFoundError:
pass
micro = subparsers.add_parser("micro", help="select micro context.")
micro.set_defaults(func=drive_micro)
micro_parser = micro.add_subparsers(title="subcommands")
# Selecting a subcommand under 'micro' is mandatory
micro_parser.required = True
micro_parser.dest = "subcommand"
# 'create_project' subcommand
create_project_parser = micro_parser.add_parser(
"create-project",
aliases=["create"],
help="create a project template of a given type or given a template dir.",
)
create_project_parser.set_defaults(subcommand_handler=create_project_handler)
create_project_parser.add_argument(
"project_dir",
help="project dir where the new project based on the template dir will be created.",
)
create_project_parser.add_argument("MLF", help="Model Library Format (MLF) .tar archive.")
create_project_parser.add_argument(
"-f",
"--force",
action="store_true",
help="force project creating even if the specified project directory already exists.",
)
# 'build' subcommand
build_parser = micro_parser.add_parser(
"build",
help="build a project dir, generally creating an image to be flashed, e.g. zephyr.elf.",
)
build_parser.set_defaults(subcommand_handler=build_handler)
build_parser.add_argument("project_dir", help="project dir to build.")
build_parser.add_argument("-f", "--force", action="store_true", help="Force rebuild.")
# 'flash' subcommand
flash_parser = micro_parser.add_parser(
"flash", help="flash the built image on a given micro target."
)
flash_parser.set_defaults(subcommand_handler=flash_handler)
flash_parser.add_argument("project_dir", help="project dir where the built image is.")
# For each platform add arguments detected automatically using Project API info query.
# Create subparsers for the platforms under 'create-project', 'build', and 'flash' subcommands.
help_msg = (
"you must select a platform from the list. You can pass '-h' for a selected "
"platform to list its options."
)
create_project_platforms_parser = create_project_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
build_platforms_parser = build_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
flash_platforms_parser = flash_parser.add_subparsers(
title="platforms", help=help_msg, dest="platform"
)
subcmds = {
# API method name Parser associated to method Handler func to call after parsing
"generate_project": [create_project_platforms_parser, create_project_handler],
"build": [build_platforms_parser, build_handler],
"flash": [flash_platforms_parser, flash_handler],
}
# Helper to add a platform parser to a subcmd parser.
def _add_parser(parser, platform):
platform_name = platform[0].upper() + platform[1:] + " platform"
platform_parser = parser.add_parser(
platform, add_help=False, help=f"select {platform_name}."
)
platform_parser.set_defaults(platform=platform)
return platform_parser
parser_by_subcmd = {}
for subcmd, subcmd_parser_handler in subcmds.items():
subcmd_parser = subcmd_parser_handler[0]
subcmd_parser.required = True # Selecting a platform or template is mandatory
parser_by_platform = {}
for platform in templates:
new_parser = _add_parser(subcmd_parser, platform)
parser_by_platform[platform] = new_parser
# Besides adding the parsers for each default platform (like Zephyr and Arduino), add a
# parser for 'template' to deal with adhoc projects/platforms.
new_parser = subcmd_parser.add_parser(
"template", add_help=False, help="select an adhoc template."
)
new_parser.add_argument(
"--template-dir", required=True, help="Project API template directory."
)
new_parser.set_defaults(platform="template")
parser_by_platform["template"] = new_parser
parser_by_subcmd[subcmd] = parser_by_platform
disposable_parser = TVMCSuppressedArgumentParser(main_parser)
try:
known_args, _ = disposable_parser.parse_known_args()
except TVMCException:
return
try:
subcmd = known_args.subcommand
platform = known_args.platform
except AttributeError:
# No subcommand or platform, hence no need to augment the parser for micro targets.
return
# Augment parser with project options.
if platform == "template":
# adhoc template
template_dir = str(Path(known_args.template_dir).resolve())
else:
# default template
template_dir = templates[platform]
try:
template = project.TemplateProject.from_directory(template_dir)
except ProjectAPIServerNotFoundError:
sys.exit(f"Error: Project API server not found in {template_dir}!")
template_info = template.info()
options_by_method = get_project_options(template_info)
# TODO(gromero): refactor to remove this map.
subcmd_to_method = {
"create-project": "generate_project",
"create": "generate_project",
"build": "build",
"flash": "flash",
}
method = subcmd_to_method[subcmd]
parser_by_subcmd_n_platform = parser_by_subcmd[method][platform]
_, handler = subcmds[method]
parser_by_subcmd_n_platform.formatter_class = (
# Set raw help text so help_text format works
argparse.RawTextHelpFormatter
)
parser_by_subcmd_n_platform.set_defaults(
subcommand_handler=handler,
valid_options=options_by_method[method],
template_dir=template_dir,
)
required = any([opt["required"] for opt in options_by_method[method]])
nargs = "+" if required else "*"
help_text_by_option = [opt["help_text"] for opt in options_by_method[method]]
help_text = "\n\n".join(help_text_by_option) + "\n\n"
parser_by_subcmd_n_platform.add_argument(
"--project-option", required=required, metavar="OPTION=VALUE", nargs=nargs, help=help_text
)
parser_by_subcmd_n_platform.add_argument(
"-h",
"--help",
"--list-options",
action="help",
help="show this help message which includes platform-specific options and exit.",
)
for one_entry in json_params:
micro.set_defaults(**one_entry)
def drive_micro(args):
# Call proper handler based on subcommand parsed.
args.subcommand_handler(args)
def create_project_handler(args):
"""Creates a new project dir."""
project_dir = get_project_dir(args.project_dir)
if os.path.exists(project_dir):
if args.force:
shutil.rmtree(project_dir)
else:
raise TVMCException(
"The specified project dir already exists. "
"To force overwriting it use '-f' or '--force'."
)
template_dir = str(Path(args.template_dir).resolve())
if not os.path.exists(template_dir):
raise TVMCException(f"Template directory {template_dir} does not exist!")
mlf_path = str(Path(args.MLF).resolve())
if not os.path.exists(mlf_path):
raise TVMCException(f"MLF file {mlf_path} does not exist!")
options = get_and_check_options(args.project_option, args.valid_options)
try:
project.generate_project_from_mlf(template_dir, project_dir, mlf_path, options)
except ServerError as error:
print("The following error occurred on the Project API server side: \n", error)
sys.exit(1)
def build_handler(args):
"""Builds a firmware image given a project dir."""
project_dir = get_project_dir(args.project_dir)
if not os.path.exists(project_dir):
raise TVMCException(f"{project_dir} doesn't exist.")
if os.path.exists(project_dir + "/build"):
if args.force:
shutil.rmtree(project_dir + "/build")
else:
raise TVMCException(
f"There is already a build in {project_dir}. "
"To force rebuild it use '-f' or '--force'."
)
options = get_and_check_options(args.project_option, args.valid_options)
try:
prj = project.GeneratedProject.from_directory(project_dir, options=options)
prj.build()
except ServerError as error:
print("The following error occurred on the Project API server side: ", error)
sys.exit(1)
def flash_handler(args):
"""Flashes a firmware image to a target device given a project dir."""
project_dir = get_project_dir(args.project_dir)
if not os.path.exists(project_dir + "/build"):
raise TVMCException(f"Could not find a build in {project_dir}")
options = get_and_check_options(args.project_option, args.valid_options)
try:
prj = project.GeneratedProject.from_directory(project_dir, options=options)
prj.flash()
except ServerError as error:
print("The following error occurred on the Project API server side: ", error)
sys.exit(1)
| 11,318 | 34.81962 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/project.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TVMC Project Generation Functions
"""
import os
import pathlib
from collections import defaultdict
from typing import Union
from . import TVMCException
from .fmtopt import format_option
def get_project_options(project_info):
"""Get all project options as returned by Project API 'server_info_query'
and return them in a dict indexed by the API method they belong to.
Parameters
----------
project_info: dict of list
a dict of lists as returned by Project API 'server_info_query' among
which there is a list called 'project_options' containing all the
project options available for a given project/platform.
Returns
-------
options_by_method: dict of list
a dict indexed by the API method names (e.g. "generate_project",
"build", "flash", or "open_transport") of lists containing all the
options (plus associated metadata and formatted help text) that belong
to a method.
The metadata associated to the options include the field 'choices' and
'required' which are convenient for parsers.
The formatted help text field 'help_text' is a string that contains the
name of the option, the choices for the option, and the option's default
value.
"""
options = project_info["project_options"]
options_by_method = defaultdict(list)
for opt in options:
# Get list of methods associated with an option based on the
# existance of a 'required' or 'optional' lists. API specification
# guarantees at least one of these lists will exist. If a list does
# not exist it's returned as None by the API.
metadata = ["required", "optional"]
option_methods = [(opt[md], bool(md == "required")) for md in metadata if opt[md]]
for methods, is_opt_required in option_methods:
for method in methods:
name = opt["name"]
# Only for boolean options set 'choices' accordingly to the
# option type. API returns 'choices' associated to them
# as None but 'choices' can be deduced from 'type' in this case.
if opt["type"] == "bool":
opt["choices"] = ["true", "false"]
if opt["choices"]:
choices = "{" + ", ".join(opt["choices"]) + "}"
else:
choices = opt["name"].upper()
option_choices_text = f"{name}={choices}"
help_text = opt["help"][0].lower() + opt["help"][1:]
if opt["default"]:
default_text = f"Defaults to '{opt['default']}'."
else:
default_text = None
formatted_help_text = format_option(
option_choices_text, help_text, default_text, is_opt_required
)
option = {
"name": opt["name"],
"choices": opt["choices"],
"help_text": formatted_help_text,
"required": is_opt_required,
}
options_by_method[method].append(option)
return options_by_method
def get_options(options):
"""Get option and option value from the list options returned by the parser.
Parameters
----------
options: list of str
list of strings of the form "option=value" as returned by the parser.
Returns
-------
opts: dict
dict indexed by option names and associated values.
"""
opts = {}
for option in options:
try:
k, v = option.split("=")
opts[k] = v
except ValueError:
raise TVMCException(f"Invalid option format: {option}. Please use OPTION=VALUE.")
return opts
def check_options(options, valid_options):
"""Check if an option (required or optional) is valid. i.e. in the list of valid options.
Parameters
----------
options: dict
dict indexed by option name of options and options values to be checked.
valid_options: list of dict
list of all valid options and choices for a platform.
Returns
-------
None. Raise TVMCException if check fails, i.e. if an option is not in the list of valid options.
"""
required_options = [opt["name"] for opt in valid_options if opt["required"]]
for required_option in required_options:
if required_option not in options:
raise TVMCException(
f"Option '{required_option}' is required but was not specified. Use --list-options "
"to see all required options."
)
remaining_options = set(options) - set(required_options)
optional_options = [opt["name"] for opt in valid_options if not opt["required"]]
for option in remaining_options:
if option not in optional_options:
raise TVMCException(
f"Option '{option}' is invalid. Use --list-options to see all available options."
)
def check_options_choices(options, valid_options):
"""Check if an option value is among the option's choices, when choices exist.
Parameters
----------
options: dict
dict indexed by option name of options and options values to be checked.
valid_options: list of dict
list of all valid options and choices for a platform.
Returns
-------
None. Raise TVMCException if check fails, i.e. if an option value is not valid.
"""
# Dict of all valid options and associated valid choices.
# Options with no choices are excluded from the dict.
valid_options_choices = {
opt["name"]: opt["choices"] for opt in valid_options if opt["choices"] is not None
}
for option in options:
if option in valid_options_choices:
if options[option] not in valid_options_choices[option]:
raise TVMCException(
f"Choice '{options[option]}' for option '{option}' is invalid. "
"Use --list-options to see all available choices for that option."
)
def get_and_check_options(passed_options, valid_options):
"""Get options and check if they are valid. If choices exist for them, check values against it.
Parameters
----------
passed_options: list of str
list of strings in the "key=value" form as captured by argparse.
valid_option: list
list with all options available for a given API method / project as returned by
get_project_options().
Returns
-------
opts: dict
dict indexed by option names and associated values.
Or None if passed_options is None.
"""
if passed_options is None:
# No options to check
return None
# From a list of k=v strings, make a dict options[k]=v
opts = get_options(passed_options)
# Check if passed options are valid
check_options(opts, valid_options)
# Check (when a list of choices exists) if the passed values are valid
check_options_choices(opts, valid_options)
return opts
def get_project_dir(project_dir: Union[pathlib.Path, str]) -> str:
"""Get project directory path"""
if not os.path.isabs(project_dir):
return os.path.abspath(project_dir)
return project_dir
| 8,132 | 33.75641 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-with,broad-exception-raised,consider-using-from-import
"""
This file contains the definition of a set of classes that wrap the outputs
of TVMC functions to create a simpler and more intuitive API.
There is one class for each required stage of a TVM workflow.
The TVMCModel represents the result of importing a model into TVM, it
contains the precompiled graph definition and parameters that define
what the model does.
Compiling a TVMCModel produces a TVMCPackage, which contains the generated
artifacts that allow the model to be run on the target hardware.
Running a TVMCPackage produces a TVMCResult, which contains the outputs of
the model and the measured runtime.
Examples
--------
The following code shows a full lifecycle for a model using tvmc, first the
model is imported from an exterior framework, in this case onnx, then it
is tuned to find the best schedules on CPU, then compiled into a TVMCPackage,
and finally run.
.. code-block:: python
tvmc_model = tvmc.load("my_model.onnx")
tuning_records = tvmc.tune(tvmc_model, target="llvm")
tvmc_package = tvmc.compile(tvmc_model, target="llvm", tuning_records=tuning_records)
result = tvmc.run(tvmc_package, device="cpu")
print(result)
"""
import os
import tarfile
import json
from typing import Optional, Union, Dict, Callable, TextIO
from pathlib import Path
import numpy as np
import tvm
import tvm.contrib.cc
from tvm import relay
from tvm.contrib import utils
from tvm.driver.tvmc import TVMCException
from tvm.relay.backend.executor_factory import GraphExecutorFactoryModule
from tvm.runtime.module import BenchmarkResult
from tvm.runtime.vm import Executable
try:
from tvm.micro import export_model_library_format
except ImportError:
export_model_library_format = None
class TVMCModel(object):
"""Initialize a TVMC model from a relay model definition or a saved file.
Parameters
----------
mod : tvm.IRModule, optional
The relay module corresponding to this model.
params : dict, optional
A parameter dictionary for the model.
model_path: str, optional
An alternative way to load a TVMCModel, the path to a previously
saved model.
"""
def __init__(
self,
mod: Optional[tvm.IRModule] = None,
params: Optional[Dict[str, tvm.nd.NDArray]] = None,
model_path: Optional[str] = None,
):
if (mod is None or params is None) and (model_path is None):
raise TVMCException(
"Either mod and params must be provided "
"or a path to a previously saved TVMCModel"
)
self._tmp_dir = utils.tempdir()
if model_path is not None:
self.load(model_path)
else:
self.mod = mod
self.params = params if params else {}
def save(self, model_path: str):
"""Save the TVMCModel to disk.
Note that this saves the graph representation,
the parameters, and the tuning records if applicable. It will not save any
compiled artifacts.
Parameters
----------
model_path : str
A full path to save this TVMCModel to including the output file name.
The file will be saved as a tar file so using a ".tar" extension is advised.
"""
temp = self._tmp_dir
# Save relay graph
relay_name = "model.json"
relay_path = temp.relpath(relay_name)
with open(relay_path, "w") as relay_file:
relay_file.write(tvm.ir.save_json(self.mod))
# Save params
params_name = "model.params"
params_path = temp.relpath(params_name)
with open(params_path, "wb") as params_file:
params_file.write(relay.save_param_dict(self.params))
# Create a tar file.
with tarfile.open(model_path, "w") as tar:
tar.add(relay_path, relay_name)
tar.add(params_path, params_name)
# If default tuning records exist, save them as well.
if os.path.exists(self.default_tuning_records_path()):
tar.add(self.default_tuning_records_path(), "tuning_records")
# Also save the compiled package if it can be found.
if os.path.exists(self.default_package_path()):
tar.add(self.default_package_path(), "model_package.tar")
def load(self, model_path: str):
"""Load a TVMCModel from disk.
Parameters
----------
model_path : str
A path to load the TVMCModel from.
"""
temp = self._tmp_dir
t = tarfile.open(model_path)
t.extractall(temp.relpath("."))
# Load relay IR.
relay_path = temp.relpath("model.json")
with open(relay_path, "r") as relay_file:
self.mod = tvm.ir.load_json(relay_file.read())
# Load parameter dictionary.
params_path = temp.relpath("model.params")
with open(params_path, "rb") as params_file:
self.params = relay.load_param_dict(params_file.read())
def default_tuning_records_path(self):
"""Get a full path for storing tuning records in this model's temporary direcotry
Note that when this path is used, the tuning records will be saved and loaded
when calling `save` and `load`.
Returns
-------
records_path: str
A path to the default location for tuning records.
"""
return self._tmp_dir.relpath("tuning_records")
def default_package_path(self):
"""Get a full path for storing a compiled package in this model's temporary direcotry
Note that when this path is used, the package will be saved and loaded
when calling `save` and `load`.
Returns
-------
records_path: str
A path to the default location for tuning records.
"""
return self._tmp_dir.relpath("model_package.tar")
def export_vm_format(
self,
vm_exec: Executable,
package_path: Optional[str] = None,
lib_format: str = "so",
):
"""Save this TVMCModel compiled via vm to file.
Parameters
----------
vm_exec : vm.Executable
The VM Executable containing compiled the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
lib_format : str
How to export the modules function library. Must be one of "so" or "tar".
Returns
-------
package_path : str
The path that the package was saved to.
"""
lib_name = "lib." + lib_format
temp = self._tmp_dir
if package_path is None:
package_path = self.default_package_path()
path_lib = temp.relpath(lib_name)
vm_exec.mod.export_library(path_lib)
self.lib_path = path_lib
# Package up all the temp files into a tar file.
with tarfile.open(package_path, "w") as tar:
tar.add(path_lib, lib_name)
return package_path
def export_classic_format(
self,
executor_factory: GraphExecutorFactoryModule,
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
lib_format: str = "so",
):
"""Save this TVMCModel to file.
Parameters
----------
executor_factory : GraphExecutorFactoryModule
The factory containing compiled the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
cross : str or callable object, optional
Function that performs the actual compilation.
cross_options : str, optional
Command line options to be passed to the cross compiler.
lib_format : str
How to export the modules function library. Must be one of "so" or "tar".
Returns
-------
package_path : str
The path that the package was saved to.
"""
lib_name = "mod." + lib_format
graph_name = "mod.json"
param_name = "mod.params"
temp = self._tmp_dir
if package_path is None:
package_path = self.default_package_path()
path_lib = temp.relpath(lib_name)
if not cross:
executor_factory.get_lib().export_library(path_lib)
else:
if not cross_options:
executor_factory.get_lib().export_library(
path_lib, tvm.contrib.cc.cross_compiler(cross)
)
else:
executor_factory.get_lib().export_library(
path_lib, tvm.contrib.cc.cross_compiler(cross, options=cross_options.split(" "))
)
self.lib_path = path_lib
with open(temp.relpath(graph_name), "w") as graph_file:
graph_file.write(executor_factory.get_graph_json())
with open(temp.relpath(param_name), "wb") as params_file:
params_file.write(relay.save_param_dict(executor_factory.get_params()))
# Package up all the temp files into a tar file.
with tarfile.open(package_path, "w") as tar:
tar.add(path_lib, lib_name)
tar.add(temp.relpath(graph_name), graph_name)
tar.add(temp.relpath(param_name), param_name)
return package_path
def export_package(
self,
executor_factory: Union[GraphExecutorFactoryModule, Executable],
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
output_format: str = "so",
):
"""Save this TVMCModel to file.
Parameters
----------
executor_factory : GraphExecutorFactoryModule
The factory containing the compiled artifacts needed to run this model.
package_path : str, None
Where the model should be saved. Note that it will be packaged as a .tar file.
If not provided, the package will be saved to a generically named file in tmp.
cross : str or callable object, optional
Function that performs the actual compilation.
cross_options : str, optional
Command line options to be passed to the cross compiler.
output_format : str
How to save the modules function library. Must be one of "so" and "tar" to save
using the classic format or "mlf" to save using the Model Library Format.
Returns
-------
package_path : str
The path that the package was saved to.
"""
if output_format not in ["so", "tar", "mlf"]:
raise TVMCException("Only 'so', 'tar', and 'mlf' output formats are supported.")
if output_format == "mlf" and cross:
raise TVMCException("Specifying the MLF output and a cross compiler is not supported.")
if isinstance(executor_factory, Executable):
package_path = self.export_vm_format(executor_factory, package_path, output_format)
elif output_format in ["so", "tar"]:
package_path = self.export_classic_format(
executor_factory, package_path, cross, cross_options, output_format
)
elif output_format == "mlf":
if export_model_library_format:
package_path = export_model_library_format(executor_factory, package_path)
else:
raise Exception("micro tvm is not enabled. Set USE_MICRO to ON in config.cmake")
return package_path
def summary(self, file: TextIO = None):
"""Print the IR corressponding to this model.
Arguments
---------
file: Writable, optional
If specified, the summary will be written to this file.
"""
print(self.mod, file=file)
class TVMCPackage(object):
"""Load a saved TVMCPackage from disk.
Parameters
----------
package_path : str
The path to the saved TVMCPackage that will be loaded.
project_dir : Path, str
If given and loading a MLF file, the path to the project directory that contains the file.
use_vm : bool
Whether the graph module was compiled with vm or not.
"""
def __init__(
self,
package_path: str,
project_dir: Optional[Union[Path, str]] = None,
):
self._tmp_dir = utils.tempdir()
self.package_path = package_path
self.import_package(self.package_path)
if project_dir and self.type != "mlf":
raise TVMCException("Setting 'project_dir' is only allowed when importing a MLF.!")
self.project_dir = project_dir
def import_package(self, package_path: str):
"""Load a TVMCPackage from a previously exported TVMCModel.
Parameters
----------
package_path : str
The path to the saved TVMCPackage.
"""
temp = self._tmp_dir
t = tarfile.open(package_path)
t.extractall(temp.relpath("."))
if os.path.exists(temp.relpath("metadata.json")):
# Model Library Format (MLF)
self.lib_name = None
self.lib_path = None
with open(temp.relpath("metadata.json")) as metadata_json:
metadata = json.load(metadata_json)
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert len(all_module_names) == 1, "Multiple modules in MLF is not supported."
module_name = all_module_names[0]
module_metdata = metadata["modules"][module_name]
has_graph_executor = "graph" in module_metdata["executors"]
graph = (
temp.relpath(f"executor-config/graph/{module_name}.graph")
if has_graph_executor
else None
)
params = temp.relpath(f"parameters/{module_name}.params")
self.type = "mlf"
# Set executor type
if len(metadata["modules"][module_name]["executors"]) > 1:
executor_types_msg = ",".join(metadata["modules"][module_name]["executors"])
raise TVMCException(
f"Found multiple executors with these types: {executor_types_msg}. "
"Currently, only one executor type (aot or graph) is supported."
)
self.executor_type = metadata["modules"][module_name]["executors"][0]
else:
# Classic format
classic_lib_name_so = "mod.so"
classic_lib_name_tar = "mod.tar"
# VM format
vm_lib_name_so = "lib.so"
vm_lib_name_tar = "lib.tar"
if os.path.exists(temp.relpath(classic_lib_name_so)):
self.lib_name = classic_lib_name_so
self.type = "classic"
elif os.path.exists(temp.relpath(classic_lib_name_tar)):
self.lib_name = classic_lib_name_tar
self.type = "classic"
elif os.path.exists(temp.relpath(vm_lib_name_so)):
self.lib_name = vm_lib_name_so
self.type = "vm"
elif os.path.exists(temp.relpath(vm_lib_name_tar)):
self.lib_name = vm_lib_name_tar
self.type = "vm"
else:
raise TVMCException("Couldn't find exported library in the package.")
self.lib_path = temp.relpath(self.lib_name)
graph, params = None, None
self.executor_type = "vm"
if self.type == "classic":
graph = temp.relpath("mod.json")
params = temp.relpath("mod.params")
self.executor_type = "graph"
if params is not None:
with open(params, "rb") as param_file:
self.params = bytearray(param_file.read())
else:
self.params = None
if graph is not None:
with open(graph) as graph_file:
self.graph = graph_file.read()
else:
self.graph = None
class TVMCResult(object):
"""A class that stores the results of tvmc.run and provides helper utilities."""
def __init__(self, outputs: Dict[str, np.ndarray], times: BenchmarkResult):
"""Create a convenience wrapper around the output of tvmc.run
Parameters
----------
outputs : dict
Outputs dictionary mapping the name of the output to its numpy value.
times : BenchmarkResult
The execution times measured by the time evaluator in seconds to produce outputs.
"""
self.outputs = outputs
self.times = times
def format_times(self):
"""Format the mean, max, min and std of the execution times.
This has the effect of producing a small table that looks like:
.. code-block::
Execution time summary:
mean (ms) median (ms) max (ms) min (ms) std (ms)
0.14310 0.14310 0.16161 0.12933 0.01004
Returns
-------
str
A formatted string containing the statistics.
"""
return str(self.times)
def get_output(self, name: str):
"""A helper function to grab one of the outputs by name.
Parameters
----------
name : str
The name of the output to return
Returns
-------
output : np.ndarray
The output corresponding to name.
"""
return self.outputs[name]
def save(self, output_path: str):
"""Save the numpy outputs to disk as a .npz file.
Parameters
----------
output_path : str
The path to save the numpy results to.
"""
np.savez(output_path, **self.outputs)
def __str__(self):
stat_table = self.format_times()
output_keys = f"Output Names:\n {list(self.outputs.keys())}"
return stat_table + "\n" + output_keys
| 19,305 | 35.564394 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
Provides support to compile networks both AOT and JIT.
"""
import logging
import os.path
import re
import itertools
from copy import deepcopy
from typing import Any, Optional, Dict, List, Union, Callable, Sequence
from pathlib import Path
from collections import defaultdict
import tvm
from tvm import autotvm, auto_scheduler
from tvm import relay
from tvm.driver.tvmc.registry import generate_registry_args, reconstruct_registry_entity
from tvm.ir.instrument import PassInstrument
from tvm.ir.memory_pools import WorkspaceMemoryPools
from tvm.target import Target
from tvm.relay.backend import Executor, Runtime
from tvm.relay.analysis.operations_distribution import analyze_operations_distribution
from tvm.relay.transform.suffixes import tag_suffixes
from . import composite_target, frontends, TVMCException
from .model import TVMCModel, TVMCPackage
from .main import register_parser
from .target import target_from_cli, generate_target_args, reconstruct_target_args
from .pass_config import parse_configs
from .pass_list import parse_pass_list_str
from .transform import generate_transform_args, parse_graph_transform_args, apply_graph_transforms
from .shape_parser import parse_shape_string
from .workspace_pools import generate_workspace_pools_args, workspace_pools_recombobulate
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_compile_parser(subparsers, _, json_params):
"""Include parser for 'compile' subcommand"""
parser = subparsers.add_parser("compile", help="compile a model.")
parser.set_defaults(func=drive_compile)
parser.add_argument(
"--cross-compiler",
default="",
help="the cross compiler to generate target libraries, e.g. 'aarch64-linux-gnu-gcc'.",
)
parser.add_argument(
"--cross-compiler-options",
default="",
help="the cross compiler options to generate target libraries, e.g. '-mfpu=neon-vfpv4'.",
)
generate_transform_args(parser)
parser.add_argument(
"--dump-code",
metavar="FORMAT",
default="",
help="comma separated list of formats to export the input model, e.g. 'asm,ll,tir,relay'.",
)
parser.add_argument(
"--dump-offloads",
default="",
help="output a mapping of which operations of the initial Relay "
"will be transferred to which backend, indicating the composite "
"that includes those operations, "
"e.g. '--dump-offloads -' to dump to the console, "
"e.g. '--dump-offloads <path_to_file>' to dump to the file. "
"If not presented, no output is done. ",
)
parser.add_argument(
"--model-format",
choices=frontends.get_frontend_names(),
help="specify input model format.",
)
parser.add_argument(
"-o",
"--output",
default="module.tar",
help="output the compiled module to a specified archive. Defaults to 'module.tar'.",
)
parser.add_argument(
"-f",
"--output-format",
choices=["so", "mlf"],
default="so",
help="output format. Use 'so' for shared object or 'mlf' for Model Library Format "
"(only for microTVM targets). Defaults to 'so'.",
)
parser.add_argument(
"--pass-config",
action="append",
metavar=("name=value"),
help="configurations to be used at compile time. This option can be provided multiple "
"times, each one to set one configuration value, "
"e.g. '--pass-config relay.backend.use_auto_scheduler=0', "
"e.g. '--pass-config tir.add_lower_pass=opt_level1,pass1,opt_level2,pass2'.",
)
generate_target_args(parser)
parser.add_argument(
"--tuning-records",
metavar="PATH",
default="",
help="path to an auto-tuning log file by AutoTVM. If not presented, "
"the fallback/tophub configs will be used.",
)
generate_registry_args(parser, Executor, "graph")
generate_registry_args(parser, Runtime, "cpp")
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity.")
# TODO (@leandron) This is a path to a physical file, but
# can be improved in future to add integration with a modelzoo
# or URL, for example.
parser.add_argument("FILE", help="path to the input model file.")
parser.add_argument(
"-O",
"--opt-level",
default=3,
type=int,
choices=range(0, 4),
metavar="[0-3]",
help="specify which optimization level to use. Defaults to '3'.",
)
parser.add_argument(
"--input-shapes",
help="specify non-generic shapes for model to run, format is "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]".',
type=parse_shape_string,
default=None,
)
parser.add_argument(
"--disabled-pass",
help="disable specific passes, comma-separated list of pass names.",
type=parse_pass_list_str,
default="",
)
parser.add_argument(
"--module-name",
default="default",
help="The output module name. Defaults to 'default'.",
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
generate_workspace_pools_args(parser)
def drive_compile(args):
"""Invoke tvmc.compiler module with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
Returns
-------
int
Zero if successfully completed
"""
if not os.path.isfile(args.FILE):
raise TVMCException(
f"Input file '{args.FILE}' doesn't exist, is a broken symbolic link, or a directory."
)
tvmc_model = frontends.load_model(args.FILE, args.model_format, args.input_shapes)
dump_code = [x.strip() for x in args.dump_code.split(",")] if args.dump_code else None
dump_offloads = args.dump_offloads if args.dump_offloads else ""
additional_targets = reconstruct_target_args(args)
workspace_pools_target, extra_targets = target_from_cli(args.target, additional_targets)
transform_args = parse_graph_transform_args(args)
compile_model(
tvmc_model,
args.target,
opt_level=args.opt_level,
executor=reconstruct_registry_entity(args, Executor),
runtime=reconstruct_registry_entity(args, Runtime),
tuning_records=args.tuning_records,
package_path=args.output,
cross=args.cross_compiler,
cross_options=args.cross_compiler_options,
output_format=args.output_format,
dump_code=dump_code,
dump_offloads=dump_offloads,
target_host=None,
disabled_pass=args.disabled_pass,
pass_context_configs=args.pass_config,
mod_name=args.module_name,
additional_target_options=additional_targets,
workspace_pools=(
workspace_pools_recombobulate(args, [workspace_pools_target], extra_targets)
),
**transform_args,
)
return 0
def compile_model(
tvmc_model: TVMCModel,
target: str,
opt_level: int = 3,
executor: Optional[Executor] = Executor("graph"),
runtime: Optional[Runtime] = Runtime("cpp"),
tuning_records: Optional[str] = None,
package_path: Optional[str] = None,
cross: Optional[Union[str, Callable]] = None,
cross_options: Optional[str] = None,
output_format: str = "so",
dump_code: Optional[List[str]] = None,
dump_offloads: str = "",
target_host: Optional[str] = None,
disabled_pass: Optional[str] = None,
pass_context_configs: Optional[List[str]] = None,
additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None,
use_vm: bool = False,
mod_name: Optional[str] = "default",
workspace_pools: Optional[WorkspaceMemoryPools] = None,
instruments: Optional[Sequence[PassInstrument]] = None,
desired_layout: Optional[str] = None,
desired_layout_ops: Optional[List[str]] = None,
mixed_precision: bool = False,
mixed_precision_ops: Optional[List[str]] = None,
mixed_precision_calculation_type: Optional[str] = None,
mixed_precision_acc_type: Optional[str] = None,
):
"""Compile a model from a supported framework into a TVM module.
This function takes a union of the arguments of both frontends.load_model
and compiler.compile_relay. The resulting TVM module can be executed using
the graph executor.
Parameters
----------
tvmc_model : TVMCModel
The model object that should be compiled.
target : str
The target for which to compile. Can be a plain string or
a path.
opt_level : int
The option that controls various sorts of optimizations.
tuning_records : str
A path to tuning records produced using tvmc.tune. When provided,
compilation will use more optimized kernels leading to better results.
package_path : str, optional
The path to export the compiled model to. If not provided it will
be saved in a temporary directory.
cross : str or callable object, optional
Function that performs the actual compilation
cross_options : str, optional
Command line options to be passed to the cross compiler.
output_format : str
What format to use when saving the function library. Must be one of "so" or "tar".
When compiling for a remote device without a cross compiler, "tar" will likely work better.
dump_code : list[str], optional
Dump the generated code for the specified source types, on
the requested target. Choose from: ["asm", "ll", "tir", "relay"].
dump_offloads : str
Dump the information about the partition of input model's layers by external codegen.
Can be '' to not dump at all, '-' to dump to the console
or '<path_to_file>' to dump to the specified file.
target_host : str, optional
The target of the host machine if host-side code
needs to be generated.
disabled_pass: str, optional
Comma-separated list of passes which needs to be disabled
during compilation
pass_context_configs: list[str], optional
List of strings containing a set of configurations to be passed to the
PassContext.
additional_target_options: Optional[Dict[str, Dict[str, Any]]]
Additional target options in a dictionary to combine with initial Target arguments
use_vm: bool
Whether to use the VM to compile the model as opposed to the graph executor
mod_name: str, optional
The module name
workspace_pools: WorkspaceMemoryPools, optional
Specification of WorkspacePoolInfo objects to be used as workspace memory in the
compilation.
instruments: Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
desired_layout: str, optional
Can be one of "NCHW" or "NHWC". When specified, compatible operations in the graph
will have their layout set to this format. Tasks will then be tuned using this
specified layout.
desired_layout_ops: list[str], optional
The list of operators to be transformed with desired layout.
mixed_precision: bool
To enable mixed precision transformation. Disabled by default.
mixed_precision_ops: list[str], optional
The list of operators to be converted to mixed precision.
Set to ["nn.conv2d", "nn.dense"] by default
mixed_precision_calculation_type: str
The calculation dtype to be used while mixed precision. Set to "float16" by default.
mixed_precision_acc_type: str
The accumulation data type to be used while mixed precision. Set to "float16" by default.
Returns
-------
compiled_model : TVMCPackage
The compiled TVMCModel ready to be run.
"""
mod, params = tvmc_model.mod, tvmc_model.params
if dump_code is None:
dump_code = []
if not isinstance(dump_code, list):
dump_code = [dump_code]
dumps = {}
config = parse_configs(pass_context_configs)
if "tir" in dump_code:
config, dumps = add_tir_to_dumps(config, dumps)
initial_relay = None
if dump_offloads != "":
# add suffixes to the span field for calls in Relay
mod = tag_suffixes(mod)
# remember initial Relay
initial_relay = deepcopy(mod)
tvm_target, extra_targets = target_from_cli(target, additional_target_options)
tvm_target, target_host = Target.canon_target_and_host(tvm_target, target_host)
partition_functions = []
partition_opts = []
for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_functions.append(codegen["pass_pipeline"])
partition_opts.append(codegen_from_cli["opts"])
if codegen["config_key"] is not None:
config[codegen["config_key"]] = codegen_from_cli["opts"]
with tvm.transform.PassContext(
opt_level=opt_level,
config=config,
disabled_pass=disabled_pass,
instruments=instruments,
):
transform_args = parse_graph_transform_args(locals())
mod = apply_graph_transforms(mod, transform_args)
for partition_function, opts in zip(partition_functions, partition_opts):
mod = partition_function(mod, params, mod_name=mod_name, **opts)
if initial_relay:
# dump which operations are offloaded to which backend
dump_operation_offloads(mod, initial_relay, dump_offloads)
if tuning_records and os.path.exists(tuning_records):
logger.debug("tuning records file provided: %s", tuning_records)
use_autoscheduler = True
try:
auto_scheduler.load_records(tuning_records)
except tvm._ffi.base.TVMError:
use_autoscheduler = False
if use_autoscheduler:
with auto_scheduler.ApplyHistoryBest(tuning_records):
config["relay.backend.use_auto_scheduler"] = True
logger.debug("building relay graph with autoscheduler")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
with autotvm.apply_history_best(tuning_records):
logger.debug("building relay graph with tuning records")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
else:
logger.debug("building relay graph (no tuning records provided)")
graph_module = build(
mod,
tvm_target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
use_vm=use_vm,
mod_name=mod_name,
workspace_pools=workspace_pools,
)
# Generate output dump files with sources
for source_type in dump_code:
if source_type == "relay":
dumps[source_type] = str(mod)
elif source_type == "tir":
dumps[source_type] = "\n".join(dumps[source_type])
else:
lib = graph_module.lib if use_vm else graph_module.get_lib()
# TODO lib.get_source call have inconsistent behavior for unsupported
# formats (@leandron).
dumps[source_type] = lib.get_source(source_type)
# Create a new tvmc model package object from the graph definition.
package_path = tvmc_model.export_package(
graph_module, package_path, cross, cross_options, output_format
)
# Write dumps to file.
if dumps:
save_dumps(package_path, dumps)
return TVMCPackage(package_path)
def build(
mod: tvm.IRModule,
tvm_target: str,
executor: Executor,
runtime: Runtime,
params: Dict[str, tvm.nd.NDArray],
use_vm: bool,
mod_name: str,
workspace_pools: Optional[WorkspaceMemoryPools],
):
"""
Builds the model with the provided executor.
Parameters
----------
mod : tvm.IRModule
The relay module corresponding to this model.
tvm_target : str
The target for which to compile. Can be a plain string or
a path.
executor : Executor
The graph executor to build the model if use_vm is not True
runtime : Runtime
The runtime configuration.
params : dict
A parameter dictionary for the model.
use_vm: bool
Whether to use the VM to compile the model as opposed to the graph executor
mod_name: str
The module name
"""
if use_vm:
logger.debug("building with vm compile")
return relay.vm.compile(mod, target=tvm_target, params=params)
logger.debug("building with relay build")
return relay.build(
mod,
target=tvm_target,
executor=executor,
runtime=runtime,
params=params,
mod_name=mod_name,
workspace_memory_pools=workspace_pools,
)
def add_tir_to_dumps(config, dumps):
"""
Creates a debug pass that dumps TIR functions as a list of strings.
"""
key = "tir"
phase = 3 # final TIR phase before codegen
dumps[key] = []
@tvm.tir.transform.prim_func_pass(opt_level=0)
def _dump_tir_pass(tir_func, _, __):
dumps[key].append(str(tir_func))
return tir_func
tir_lower_passes = config.get("tir.add_lower_pass", [])
tir_lower_passes.append((phase, _dump_tir_pass))
config["tir.add_lower_pass"] = tir_lower_passes
return config, dumps
def save_dumps(module_name: str, dumps: Dict[str, str], dump_root: str = "."):
"""
Serialize dump files to the disk.
Parameters
----------
module_name : str
File name, referring to the module that generated
the dump contents
dumps : dict
The output contents to be saved into the files
dump_root : str, optional
Path in which dump files will be created
"""
for dump_format in dumps:
dump_name = module_name + "." + dump_format
with open(Path(dump_root, dump_name), "w") as f:
f.write(dumps[dump_format])
def dump_operation_offloads(mod: tvm.ir.IRModule, initial_mod: tvm.ir.IRModule, dump_path: str):
"""This helper function forms a line-by-line output of the initial Relay lines,
indicating which operations are ported to which target,
and indicating the composite that includes those operations;
the 'generic' target refers to operations uploaded to the host, e.g
'target1 <- target1.qnn_conv2d'
'target1 <- %0 = qnn.conv2d(%tfl.quantize, %v_param_1, ...'
'target1 <- %1 = nn.bias_add(%0, %v_param_2, axis=3);'
'target1 <- %2 = qnn.requantize(%1, meta[relay.Constant]...'
'target2 <- target2.reshape'
'target2 <- %3 = reshape(%2, newshape=[1, 1001]);'
'generic <- %4 = nn.pad(%3, -128f, pad_width=[[0, 0], [1, 1]...'
Parameters
----------
mod : tvm.ir.IRModule
The partitioned IRModule with external global functions.
initial_mod : tvm.ir.IRModule
The initial IRModule that gets generated from a relay frontend.
dump_path: str
Value of the "dump_offloads" compiler atribute.
Could be dash ("-") or file path or empty string for
printing to console, file or doing nothing respectively.
"""
print_to_console = dump_path == "-"
save_to_file = all([dump_path != "-", dump_path != ""])
if print_to_console or save_to_file:
operations_distribution = analyze_operations_distribution(mod)
def annotate_f(x):
ret = ""
if isinstance(x, relay.Call):
# if there is no x.span.source_name.name in operations_distribution,
# this could mean that the span was not copied during the application of passes
# to the Relay, in which case we can not associate the initial Relay string
# with the resulting Relay call
source_name = x.span.source_name.name
suffix = tvm.relay.transform.suffixes.SUFFIX_STRING
result = re.search(r"(.*)(" + suffix + r")(.*)", source_name)
func_id = result.group(1)
if func_id in operations_distribution:
compiler_name, op_name = operations_distribution[func_id]
ret = (
f", compiler_name: {compiler_name}, op_name: {op_name}, "
f"func_id: {func_id}"
)
else:
ret = ", compiler_name: unknown, op_name: unknown, func_id: unknown"
elif isinstance(x, (relay.Tuple, relay.TupleGetItem)):
ret = ", compiler_name: none, op_name: none, func_id: none"
return ret
initial_relay_astext = initial_mod.astext(show_meta_data=False, annotate=annotate_f).split(
"\n"
)
# funcs_list is a list of internal composite/function IDs.
# funcs_list helps keep the order of lines from the initial Relay.
funcs_list = []
# target_statistic is a mapping of the target name to the
# number of initial Relay calls offloaded on the target
target_statistic = defaultdict(int)
# funcs_dict is a mapping of the generated analyze_operations_distribution
# internal composite/function IDs to a list, where:
# 1st element is
# (1a): "generic"|"unknown"|"none"* or
# (1b): specific target name, like "ethos-u" or "cmsis-nn"
# 2nd element is
# (2a): corresponding initial Relay line for the case (1a) or
# (2b): the name of the target composite functon in the other case (1b)
# 3rd element or subsequent ones are presented only for the case (2b)
# and are the initial Relay's lines included in the corresponding
# target composite functon
#
# *Description of what is meant by "generic"|"unknown"|"none":
# "generic" means that operation will be run on a host
# "unknown" means that unique identifier of this Relay line not found in the partitioned
# Relay and therefore not present in the operations_distribution dictionary
# "none" means that this Relay line is not relay.Call
funcs_dict = {}
# Here we group together initial Relay lines from the one composite
counter = itertools.count()
for s in initial_relay_astext:
result = re.search(
r"(compiler_name: )(.*)(, op_name: )(.*)(, func_id: )((.*)(?=;)|(.*))", s
)
if result:
target_name = result.group(2)
op_name = result.group(4)
func_id = result.group(6)
if target_name != "none":
target_statistic[target_name] += 1
# create an identifier for each "unknown" or "none" case to keep the lines order
if func_id == "unknown" or func_id == "none" or target_name == "generic":
func_id = str(next(counter) * -1)
if func_id not in funcs_dict:
funcs_list.append(func_id)
funcs_dict[func_id] = [target_name]
if target_name not in ["unknown", "generic", "none"]:
funcs_dict[func_id].append(op_name)
s = re.sub(r", compiler_name: (.*)", "", s).lstrip()
funcs_dict[func_id].append(s)
# Here we prepare the output for printing.
# The output in most cases keeps the original order of the Relay lines
# but some lines are moved to be in the corresponding composite group
output = []
total = 0
output.append("Total number of operators and distribution by targets")
output.append("Total:")
for target, statistic in target_statistic.items():
total += statistic
output.append(f"{target}: {statistic}")
output[1] += f" {total}"
output[len(target_statistic) + 1] += "\n"
for func_id in funcs_list:
_list = funcs_dict[func_id]
if _list[0] != "none":
output.append(f"{_list[0]:<15}<-{' ':5}{_list[1]}")
else:
output.append(f"{' ':>22}{_list[1]}")
if _list[0] == "unknown":
output.append(
"Warning: The above line means that some pass(es) \
in Relay partitioning"
)
output.append("do not copy the span when the call is recreated")
output.append(
"and a line from initial Relay could not be associated \
with the resulting Relay"
)
for el in _list[2:]:
output.append(f"{_list[0]:<15}<-{' ':10}{el}")
if print_to_console:
print("\n" + "\n".join(output))
if save_to_file:
file_path = os.path.abspath(dump_path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, "w") as f:
f.write("\n".join(output))
f.write("\n")
| 27,001 | 38.133333 | 99 | py |
tvm | tvm-main/python/tvm/driver/tvmc/runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-from-import
"""
Provides support to run compiled networks both locally and remotely.
"""
from contextlib import ExitStack
import logging
import pathlib
from typing import Dict, Optional, Union
from tarfile import ReadError
import argparse
import sys
import json
import numpy as np
import tvm
from tvm import rpc
from tvm.runtime import vm
from tvm.autotvm.measure import request_remote
from tvm.contrib import graph_executor as executor
from tvm.contrib.debugger import debug_executor
from tvm.runtime import profiler_vm
from tvm.relay.param_dict import load_param_dict
from . import TVMCException
from .arguments import TVMCSuppressedArgumentParser
from .project import (
get_project_options,
get_and_check_options,
get_project_dir,
)
from .main import register_parser
from .model import TVMCPackage, TVMCResult
from .result_utils import get_top_results
from .tracker import tracker_host_port_from_cli
try:
import tvm.micro.project as project
from tvm.micro.project import TemplateProjectError
from tvm.micro.project_api.client import ProjectAPIServerNotFoundError
SUPPORT_MICRO = True
except (ImportError, AttributeError) as exception:
SUPPORT_MICRO = False
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_run_parser(subparsers, main_parser, json_params):
"""Include parser for 'run' subcommand"""
# Use conflict_handler='resolve' to allow '--list-options' option to be properly overriden when
# augmenting the parser with the micro device options (i.e. when '--device micro').
parser = subparsers.add_parser("run", help="run a compiled module", conflict_handler="resolve")
parser.set_defaults(func=drive_run)
# TODO --device needs to be extended and tested to support other targets,
# like 'webgpu', etc (@leandron)
parser.add_argument(
"--device",
choices=["cpu", "cuda", "cl", "metal", "vulkan", "rocm", "micro"],
default="cpu",
help="target device to run the compiled module. Defaults to 'cpu'",
)
parser.add_argument(
"--fill-mode",
choices=["zeros", "ones", "random"],
default="random",
help="fill all input tensors with values. In case --inputs/-i is provided, "
"they will take precedence over --fill-mode. Any remaining inputs will be "
"filled using the chosen fill mode. Defaults to 'random'",
)
parser.add_argument("-i", "--inputs", help="path to the .npz input file")
parser.add_argument("-o", "--outputs", help="path to the .npz output file")
parser.add_argument(
"--print-time",
action="store_true",
help="record and print the execution time(s). Enabling print-time will result "
" in (1 + repeat * number) executions of the model. (non-micro devices only)",
)
parser.add_argument(
"--print-top",
metavar="N",
type=int,
help="print the top n values and indices of the output tensor",
)
parser.add_argument(
"--profile",
action="store_true",
help="generate profiling data from the runtime execution. "
"Using --profile requires the Graph Executor Debug enabled on TVM. "
"Profiling may also have an impact on inference time, "
"making it take longer to be generated. (non-micro devices only)",
)
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity.")
parser.add_argument(
"--end-to-end",
action="store_true",
help="Measure data transfers as well as model execution. This can provide a "
"more realistic performance measurement in many cases. Requires "
"'--print-time' to be specified.",
)
parser.add_argument(
"--repeat",
metavar="N",
type=int,
default=1,
help="How many times to repeat the run. Requires '--print-time' to be "
"specified. Defaults to '1'",
)
parser.add_argument(
"--number",
metavar="N",
type=int,
default=1,
help="The number of runs to measure within each repeat. Requires "
"'--print-time' to be specified. Defaults to '1'",
)
parser.add_argument(
"--rpc-key",
help="the RPC tracker key of the target device. (non-micro devices only)",
)
parser.add_argument(
"--rpc-tracker",
help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, "
"e.g. '192.168.0.100:9999'. (non-micro devices only)",
)
parser.add_argument(
"PATH",
help="path to the compiled module file or to the project directory if '--device micro' "
"is selected.",
)
parser.add_argument(
"--list-options",
action="store_true",
help="show all run options and option choices when '--device micro' is selected. "
"(micro devices only)",
)
disposable_parser = TVMCSuppressedArgumentParser(main_parser)
try:
known_args, _ = disposable_parser.parse_known_args()
except TVMCException:
return
if vars(known_args).get("device") != "micro":
# No need to augment the parser for micro targets.
return
if SUPPORT_MICRO is False:
sys.exit(
"'--device micro' is not supported. "
"Please build TVM with micro support (USE_MICRO ON)!"
)
project_dir = get_project_dir(known_args.PATH)
try:
project_ = project.GeneratedProject.from_directory(project_dir, None)
except ProjectAPIServerNotFoundError:
sys.exit(f"Error: Project API server not found in {project_dir}!")
except TemplateProjectError:
sys.exit(
"Error: Project directory error. That usually happens when model.tar is not found."
)
project_info = project_.info()
options_by_method = get_project_options(project_info)
mlf_path = project_info["model_library_format_path"]
parser.formatter_class = (
argparse.RawTextHelpFormatter
) # Set raw help text so customized help_text format works
parser.set_defaults(valid_options=options_by_method["open_transport"], mlf_path=mlf_path)
required = any([opt["required"] for opt in options_by_method["open_transport"]])
nargs = "+" if required else "*"
help_text_by_option = [opt["help_text"] for opt in options_by_method["open_transport"]]
help_text = "\n\n".join(help_text_by_option) + "\n\n"
parser.add_argument(
"--project-option", required=required, metavar="OPTION=VALUE", nargs=nargs, help=help_text
)
parser.add_argument(
"--list-options",
action="help",
help="show this help message with platform-specific options and exit.",
)
for one_entry in json_params:
parser.set_defaults(**one_entry)
def drive_run(args):
"""Invoke runner module with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
"""
path = pathlib.Path(args.PATH)
options = None
project_dir = None
if args.device == "micro":
# If it's a micro device, then grab the model.tar path from Project API instead.
# args.PATH will be used too since it points to the project directory. N.B.: there is no
# way to determine the model.tar path from the project dir or vice-verse (each platform
# is free to put model.tar whereever it's convenient).
project_dir = path
path = pathlib.Path(args.mlf_path)
# Check for options unavailable for micro targets.
if args.rpc_key or args.rpc_tracker:
raise TVMCException(
"--rpc-key and/or --rpc-tracker can't be specified for micro targets."
)
if args.device != "micro":
raise TVMCException(
f"Device '{args.device}' not supported. "
"Only device 'micro' is supported to run a model in MLF, "
"i.e. when '--device micro'."
)
if args.profile:
raise TVMCException("--profile is not currently supported for micro devices.")
if args.print_time:
raise TVMCException("--print-time is not currently supported for micro devices.")
# Get and check options for micro targets.
options = get_and_check_options(args.project_option, args.valid_options)
else:
# Check for options only availabe for micro targets.
if args.list_options:
raise TVMCException(
"--list-options is only availabe on micro targets, i.e. when '--device micro'."
)
try:
tvmc_package = TVMCPackage(package_path=path, project_dir=project_dir)
except IsADirectoryError:
raise TVMCException(f"File {path} must be an archive, not a directory.")
except FileNotFoundError:
raise TVMCException(f"File {path} does not exist.")
except ReadError:
raise TVMCException(f"Could not read model from archive {path}!")
rpc_hostname, rpc_port = tracker_host_port_from_cli(args.rpc_tracker)
try:
inputs = np.load(args.inputs) if args.inputs else {}
except IOError as ex:
raise TVMCException("Error loading inputs file: %s" % ex)
result = run_module(
tvmc_package,
args.device,
hostname=rpc_hostname,
port=rpc_port,
rpc_key=args.rpc_key,
inputs=inputs,
fill_mode=args.fill_mode,
benchmark=args.print_time,
repeat=args.repeat,
number=args.number,
profile=args.profile,
end_to_end=args.end_to_end,
options=options,
)
if args.print_time:
stat_table = result.format_times()
# print here is intentional
print(stat_table)
if args.print_top:
top_results = get_top_results(result, args.print_top)
# print here is intentional
print(top_results)
if args.outputs:
# Save the outputs
result.save(args.outputs)
def get_input_info(graph_str: str, params: Dict[str, tvm.nd.NDArray]):
"""Return the 'shape' and 'dtype' dictionaries for the input
tensors of a compiled module.
.. note::
We can't simply get the input tensors from a TVM graph
because weight tensors are treated equivalently. Therefore, to
find the input tensors we look at the 'arg_nodes' in the graph
(which are either weights or inputs) and check which ones don't
appear in the params (where the weights are stored). These nodes
are therefore inferred to be input tensors.
.. note::
There exists a more recent API to retrieve the input information
directly from the module. However, this isn't supported when using
with RPC due to a lack of support for Array and Map datatypes.
Therefore, this function exists only as a fallback when RPC is in
use. If RPC isn't being used, please use the more recent API.
Parameters
----------
graph_str : str
JSON graph of the module serialized as a string.
params : dict
Parameter dictionary mapping name to value.
Returns
-------
shape_dict : dict
Shape dictionary - {input_name: tuple}.
dtype_dict : dict
dtype dictionary - {input_name: dtype}.
"""
shape_dict = {}
dtype_dict = {}
params_dict = load_param_dict(params)
param_names = [k for (k, v) in params_dict.items()]
graph = json.loads(graph_str)
for node_id in graph["arg_nodes"]:
node = graph["nodes"][node_id]
# If a node is not in the params, infer it to be an input node
name = node["name"]
if name not in param_names:
shape_dict[name] = graph["attrs"]["shape"][1][node_id]
dtype_dict[name] = graph["attrs"]["dltype"][1][node_id]
return shape_dict, dtype_dict
def generate_tensor_data(shape: tuple, dtype: str, fill_mode: str):
"""Generate data to produce a tensor of given shape and dtype.
Random data generation depends on the dtype. For int8 types,
random integers in the range 0->255 are generated. For all other
types, random floats are generated in the range -1->1 and then
cast to the appropriate dtype.
This is used to quickly generate some data to input the models, as
a way to check that compiled module is sane for running.
Parameters
----------
shape : tuple
The shape of the tensor.
dtype : str
The dtype of the tensor.
fill_mode : str
The fill-mode to use, either "zeros", "ones" or "random".
Returns
-------
tensor : np.array
The generated tensor as a np.array.
"""
if fill_mode == "zeros":
tensor = np.zeros(shape=shape, dtype=dtype)
elif fill_mode == "ones":
tensor = np.ones(shape=shape, dtype=dtype)
elif fill_mode == "random":
if "int8" in dtype:
tensor = np.random.randint(128, size=shape, dtype=dtype)
else:
tensor = np.random.uniform(-1, 1, size=shape).astype(dtype)
else:
raise TVMCException("unknown fill-mode: {}".format(fill_mode))
return tensor
def make_inputs_dict(
shape_dict: tvm.container.Map,
dtype_dict: tvm.container.Map,
inputs: Optional[Dict[str, np.ndarray]] = None,
fill_mode: str = "random",
):
"""Make the inputs dictionary for a graph.
Use data from 'inputs' where specified. For input tensors
where no data has been given, generate data according to the
chosen fill-mode.
Parameters
----------
shape_dict : Map
Shape dictionary - {input_name: tuple}.
dtype_dict : Map
dtype dictionary - {input_name: dtype}.
inputs : dict, optional
A dictionary that maps input names to numpy values.
fill_mode : str, optional
The fill-mode to use when generating tensor data.
Can be either "zeros", "ones" or "random".
Returns
-------
inputs_dict : dict
Complete inputs dictionary - {input_name: np.array}.
"""
logger.debug("creating inputs dict")
if inputs is None:
inputs = {}
# First check all the keys in inputs exist in the graph
for input_name in inputs:
if input_name not in shape_dict.keys():
raise TVMCException(
"the input tensor '{}' is not in the graph. Expected inputs: '{}'".format(
input_name, list(shape_dict.keys())
)
)
# Now construct the input dict, generating tensors where no
# data already exists in 'inputs'
inputs_dict = {}
for input_name in shape_dict:
if input_name in inputs.keys():
logger.debug("setting input '%s' with user input data", input_name)
inputs_dict[input_name] = inputs[input_name]
else:
# container.ShapleTuple -> tuple
shape = tuple(shape_dict[input_name])
# container.String -> str
dtype = str(dtype_dict[input_name])
logger.debug(
"generating data for input '%s' (shape: %s, dtype: %s), using fill-mode '%s'",
input_name,
shape,
dtype,
fill_mode,
)
data = generate_tensor_data(shape, dtype, fill_mode)
inputs_dict[input_name] = data
return inputs_dict
def run_module(
tvmc_package: TVMCPackage,
device: str,
hostname: Optional[str] = None,
port: Union[int, str] = 9090,
rpc_key: Optional[str] = None,
inputs: Optional[Dict[str, np.ndarray]] = None,
fill_mode: str = "random",
benchmark: bool = False,
repeat: int = 10,
number: int = 10,
profile: bool = False,
end_to_end: bool = False,
options: dict = None,
):
"""Run a compiled graph executor module locally or remotely with
optional input values.
If input tensors are not specified explicitly, they can be filled
with zeroes, ones or random data.
Parameters
----------
tvmc_package: TVMCPackage
The compiled model package object that will be run.
device: str,
the device (e.g. "cpu" or "cuda") to be targeted by the RPC
session, local or remote).
hostname : str, optional
The hostname of the target device on which to run.
port : int, optional
The port of the target device on which to run.
rpc_key : str, optional
The tracker key of the target device. If this is set, it
will be assumed that remote points to a tracker.
inputs : dict, optional
A dictionary that maps input names to numpy values. If not provided,
inputs will be generated using the fill_mode argument.
fill_mode : str, optional
The fill-mode to use when generating data for input tensors.
Valid options are "zeros", "ones" and "random".
Defaults to "random".
benchmark : bool, optional
Whether to benchmark the execution of the module. Enabling benchmark will
result in (1 + repeat * number) executions of the model.
repeat : int, optional
How many times to repeat the run. Requires `benchmark` to be set to True.
number : int, optional
The number of runs to measure within each repeat.
Requires `benchmark` to be set to True.
profile : bool
Whether to profile the run with the debug executor.
end_to_end : bool
Whether to measure the time of memory copies as well as model
execution. Turning this on can provide a more realistic estimate
of how long running the model in production would take.
Requires `benchmark` to be set to True.
Returns
-------
TVMCResult
The results of the run, including the output data.
"""
if not isinstance(tvmc_package, TVMCPackage):
raise TVMCException(
"This model doesn't seem to have been compiled yet. "
"Try calling tvmc.compile on the model before running it."
)
with ExitStack() as stack:
# Currently only two package formats are supported: "classic" and
# "mlf". The later can only be used for micro targets, i.e. with microTVM.
if device == "micro":
if tvmc_package.type != "mlf":
raise TVMCException(f"Model {tvmc_package.package_path} is not a MLF archive.")
project_dir = get_project_dir(tvmc_package.project_dir)
# This is guaranteed to work since project_dir was already checked when
# building the dynamic parser to accommodate the project options, so no
# checks are in place when calling GeneratedProject.
project_ = project.GeneratedProject.from_directory(project_dir, options)
else:
if tvmc_package.type == "mlf":
raise TVMCException(
"You're trying to run a model saved using the Model Library Format (MLF). "
"MLF can only be used to run micro device ('--device micro')."
)
if hostname:
if isinstance(port, str):
port = int(port)
# Remote RPC
if rpc_key:
logger.debug("Running on remote RPC tracker with key %s.", rpc_key)
session = request_remote(rpc_key, hostname, port, timeout=1000)
else:
logger.debug("Running on remote RPC with no key.")
session = rpc.connect(hostname, port)
elif device == "micro":
# Remote RPC (running on a micro target)
logger.debug("Running on remote RPC (micro target).")
try:
session = tvm.micro.Session(project_.transport())
stack.enter_context(session)
except:
raise TVMCException("Could not open a session with the micro target.")
else:
# Local
logger.debug("Running a local session.")
session = rpc.LocalSession()
# Micro targets don't support uploading a model. The model to be run
# must be already flashed into the micro target before one tries
# to run it. Hence skip model upload for micro targets.
if device != "micro":
session.upload(tvmc_package.lib_path)
lib = session.load_module(tvmc_package.lib_name)
# TODO expand to other supported devices, as listed in tvm.rpc.client (@leandron)
logger.debug("Device is %s.", device)
if device == "cuda":
dev = session.cuda()
elif device == "cl":
dev = session.cl()
elif device == "metal":
dev = session.metal()
elif device == "vulkan":
dev = session.vulkan()
elif device == "rocm":
dev = session.rocm()
elif device == "micro":
dev = session.device
lib = session.get_system_lib()
else:
assert device == "cpu"
dev = session.cpu()
if tvmc_package.type == "vm":
assert inputs is not None, "vm runner requires inputs to be provided as a dict"
input_tensor = {}
for e, i in inputs.items():
input_tensor[e] = tvm.nd.array(i, dev)
if profile:
logger.debug("Creating vm with profile enabled.")
exe = profiler_vm.VirtualMachineProfiler(lib, dev)
res = exe.profile(**input_tensor, func_name="main")
# This print is intentional
print(res)
else:
exe = vm.VirtualMachine(lib, dev)
exe_outputs = exe.invoke("main", **input_tensor)
if benchmark:
times = exe.benchmark(
dev,
**input_tensor,
func_name="main",
repeat=repeat,
number=number,
end_to_end=end_to_end,
)
else:
exe.run(**input_tensor)
times = []
# Special handling if the output only has a single value
if not isinstance(exe_outputs, list):
exe_outputs = [exe_outputs]
outputs = {}
for i, val in enumerate(exe_outputs):
output_name = "output_{}".format(i)
outputs[output_name] = val.numpy()
else:
# TODO(gromero): Adjust for micro targets.
if profile:
logger.debug("Creating runtime with profiling enabled.")
module = debug_executor.create(tvmc_package.graph, lib, dev, dump_root="./prof")
else:
if device == "micro":
logger.debug("Creating runtime (micro) with profiling disabled.")
if tvmc_package.executor_type == "aot":
module = tvm.micro.create_local_aot_executor(session)
else:
module = tvm.micro.create_local_graph_executor(tvmc_package.graph, lib, dev)
else:
logger.debug("Creating runtime with profiling disabled.")
module = executor.create(tvmc_package.graph, lib, dev)
if tvmc_package.executor_type == "graph":
logger.debug("Loading params into the runtime module.")
module.load_params(tvmc_package.params)
logger.debug("Collecting graph input shape and type:")
if isinstance(session, tvm.rpc.client.RPCSession):
# RPC does not support datatypes such as Array and Map,
# fallback to obtaining input information from graph json.
shape_dict, dtype_dict = get_input_info(tvmc_package.graph, tvmc_package.params)
else:
shape_dict, dtype_dict = module.get_input_info()
logger.debug("Graph input shape: %s", shape_dict)
logger.debug("Graph input type: %s", dtype_dict)
inputs_dict = make_inputs_dict(shape_dict, dtype_dict, inputs, fill_mode)
logger.debug("Setting inputs to the module.")
module.set_input(**inputs_dict)
# Run must be called explicitly if profiling
if profile:
logger.info("Running the module with profiling enabled.")
report = module.profile()
# This print is intentional
print(report)
if not benchmark or device == "micro":
# TODO(gromero): Fix time_evaluator() for micro targets. Once it's
# fixed module.benchmark() can be used instead and this if/else can
# be removed.
module.run()
times = []
else:
# Call the benchmarking function of the executor.
# Optionally measure e2e data transfers from the
# CPU to device memory overheads (e.g. PCIE
# overheads if the device is a discrete GPU).
if end_to_end:
dev = session.cpu()
times = module.benchmark(dev, number=number, repeat=repeat, end_to_end=end_to_end)
logger.debug("Collecting the output tensors.")
num_outputs = module.get_num_outputs()
outputs = {}
for i in range(num_outputs):
output_name = "output_{}".format(i)
outputs[output_name] = module.get_output(i).numpy()
return TVMCResult(outputs, times)
| 26,506 | 36.02095 | 100 | py |
tvm | tvm-main/python/tvm/driver/tvmc/result_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file contains utility functions for processing the outputs
of TVMC models. These utilities are likely to be task specific,
overtime more will be added to support more machine learning tasks.
Examples
--------
The following code shows how one might postprocess
the output of a classification model.
.. code-block:: python
result = tvmc.run(tvmc_package, device="cpu")
top_results = result_utils.get_top_results(max_results=5)
"""
import numpy as np
from .model import TVMCResult
def get_top_results(result: TVMCResult, max_results: int):
"""Return the top n results from the output tensor.
This function is primarily for image classification and will
not necessarily generalize.
Parameters
----------
result : TVMCResult
The output of a TVMCModel
max_results : int
Number of results to return
Returns
-------
top_results : np.array
Results array of shape (2, n).
The first row is the indices and the second is the values.
"""
output = np.copy(result.outputs["output_0"])
sorted_labels = output.argsort()[0][-max_results:][::-1]
output.sort()
sorted_values = output[0][-max_results:][::-1]
top_results = np.array([sorted_labels, sorted_values])
return top_results
| 2,072 | 32.983607 | 67 | py |
tvm | tvm-main/python/tvm/driver/tvmc/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin,wrong-import-position
"""
TVMC - TVM driver command-line interface
"""
class TVMCException(Exception):
"""TVMC Exception"""
class TVMCImportError(TVMCException):
"""TVMC TVMCImportError"""
from . import micro
from . import runner
from . import autotuner
from . import compiler
from . import result_utils
from .frontends import load_model as load
from .compiler import compile_model as compile
from .runner import run_module as run
from .autotuner import tune_model as tune
from .model import TVMCModel, TVMCPackage, TVMCResult
| 1,362 | 32.243902 | 62 | py |
tvm | tvm-main/python/tvm/driver/tvmc/composite_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to composite target on TVMC.
"""
import logging
# Make sure Vitis AI codegen is registered
import tvm.contrib.target.vitis_ai # pylint: disable=unused-import
from tvm.relay.op.contrib.arm_compute_lib import partition_for_arm_compute_lib
from tvm.relay.op.contrib.ethosn import partition_for_ethosn
from tvm.relay.op.contrib.cmsisnn import partition_for_cmsisnn
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.relay.op.contrib.bnns import partition_for_bnns
from tvm.relay.op.contrib.vitis_ai import partition_for_vitis_ai
from tvm.relay.op.contrib.clml import partition_for_clml
from tvm.driver.tvmc import TVMCException
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
# Global dictionary to map targets
#
# Options
# -------
# config_key : str
# The configuration key to be used in the PassContext (if any).
# pass_pipeline : Callable
# A function to transform a Module before compilation, mainly used
# for partitioning for the target currently.
REGISTERED_CODEGEN = {
"compute-library": {
"config_key": None,
"pass_pipeline": partition_for_arm_compute_lib,
},
"cmsis-nn": {
"config_key": "relay.ext.cmsisnn.options",
"pass_pipeline": partition_for_cmsisnn,
},
"ethos-n": {
"config_key": "relay.ext.ethos-n.options",
"pass_pipeline": partition_for_ethosn,
},
"ethos-u": {
"config_key": "relay.ext.ethos-u.options",
"pass_pipeline": partition_for_ethosu,
},
"bnns": {
"config_key": None,
"pass_pipeline": partition_for_bnns,
},
"vitis-ai": {
"config_key": "relay.ext.vitis_ai.options",
"pass_pipeline": partition_for_vitis_ai,
},
"clml": {
"config_key": None,
"pass_pipeline": partition_for_clml,
},
}
def get_codegen_names():
"""Return a list of all registered codegens.
Returns
-------
list of str
all registered targets
"""
return list(REGISTERED_CODEGEN.keys())
def get_codegen_by_target(name):
"""Return a codegen entry by name.
Parameters
----------
name : str
The name of the target for which the codegen info should be retrieved.
Returns
-------
dict
requested target codegen information
"""
try:
return REGISTERED_CODEGEN[name]
except KeyError:
raise TVMCException("Composite target %s is not defined in TVMC." % name)
| 3,269 | 28.727273 | 81 | py |
tvm | tvm-main/python/tvm/meta_schedule/extracted_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Extracted tasks from high-level IR."""
from typing import List
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import Object
from tvm.target import Target
from . import _ffi_api
@register_object("meta_schedule.ExtractedTask")
class ExtractedTask(Object):
"""A tuning task extracted from the high-level IR
Parameters
----------
task_name : str
The name of the task extracted
mod : IRModule
The high-level IR
target: Target
Target information
dispatched : List[IRModule]
A list of low-level IRs that the high-level IR could potentially dispatch to
weight : int
The weight of the task
"""
task_name: str
mod: IRModule
dispatched: List[IRModule]
weight: int
def __init__(
self,
task_name: str,
mod: IRModule,
target: Target,
dispatched: List[IRModule],
weight: int,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.ExtractedTask, # type: ignore # pylint: disable=no-member
task_name,
mod,
target,
dispatched,
weight,
)
| 1,989 | 28.701493 | 84 | py |
tvm | tvm-main/python/tvm/meta_schedule/arg_info.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The argument information"""
from typing import Any, List, Union
from tvm._ffi import register_object
from tvm.ir import IRModule
from tvm.runtime import DataType, Object, ShapeTuple
from tvm.tir import PrimFunc
from . import _ffi_api
from .utils import _json_de_tvm
@register_object("meta_schedule.ArgInfo")
class ArgInfo(Object):
"""Argument information"""
def as_json(self) -> Any:
"""Converts the ArgInfo to its corresponding JSON representation."""
return _json_de_tvm(_ffi_api.ArgInfoAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any) -> "ArgInfo":
"""Parse the argument information from a JSON object.
Parameters
----------
json_obj : Any
The json object to parse.
Returns
-------
parsed : ArgInfo
The argument information parsed.
"""
return _ffi_api.ArgInfoFromJSON(json_obj) # type: ignore # pylint: disable=no-member
@staticmethod
def from_prim_func(func: PrimFunc) -> List["ArgInfo"]:
"""Extract a list of the argument information from PrimFunc.
Parameters
----------
func : PrimFunc
The PrimFunc to get argument information from.
Returns
-------
extracted : List[ArgInfo]
An array of the argument information derived.
"""
return _ffi_api.ArgInfoFromPrimFunc(func) # type: ignore # pylint: disable=no-member
@staticmethod
def from_entry_func(mod: IRModule, remove_preproc: bool = True) -> List["ArgInfo"]:
"""Extract a list of the argument information from the entry func of an IRModule.
Parameters
----------
mod : IRModule
The IRModule to get argument information from.
remove_preproc : bool
Whether to remove the preprocessing blocks.
Returns
-------
extracted : List[ArgInfo]
An array of the argument information derived.
"""
return _ffi_api.ArgInfoFromEntryFunc(mod, remove_preproc) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TensorInfo")
class TensorInfo(ArgInfo):
"""Tensor argument information
Parameters
----------
dtype : DataType
The data type of the tensor.
shape : ShapeTuple
The shape of the tensor.
"""
dtype: DataType
shape: ShapeTuple
def __init__(
self,
dtype: DataType,
shape: Union[ShapeTuple, List[int]],
) -> None:
"""Constructor
Parameters
----------
dtype : DataType
The data type of the tensor.
shape : ShapeTuple
The shape of the tensor.
"""
if isinstance(shape, ShapeTuple):
shape_tuple = shape
else:
shape_tuple = ShapeTuple(shape)
self.__init_handle_by_constructor__(
_ffi_api.TensorInfo, # type: ignore # pylint: disable=no-member
dtype,
shape_tuple,
)
| 3,887 | 29.857143 | 109 | py |
tvm | tvm-main/python/tvm/meta_schedule/trace_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Specialized applications of trace"""
from ..tir.schedule import Schedule, Trace
from ..target import Target
from . import _ffi_api
def schedule_using_anchor_trace(sch: Schedule, anchor_trace: Trace, target: Target) -> None:
"""Apply the trace from a TIR module whose anchor block is the same but fused elemewise op
blocks differ. This function can be used for transferring a trace tuned on a conv2d -> add
subgraph to other subgraphs having the same conv2d workload, for example. We call such trace
an "anchor trace". Those blocks that are not scheduled by the given anchor trace will be either
inlined or parallelized.
Parameters
----------
sch : Schedule
The target schedule
anchor_trace: Trace
The trace generated for other TIR module having the same anchor block
target : tvm.target.Target
The compilation target
"""
_ffi_api.ScheduleUsingAnchorTrace(sch, anchor_trace, target) # type: ignore
| 1,761 | 43.05 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MetaSchedule-Relay integration"""
from contextlib import contextmanager
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
import numpy as np # type: ignore
from tvm import nd
from tvm._ffi import get_global_func
from tvm.ir import IRModule, transform
from tvm.ir.instrument import PassInstrument
from tvm.runtime import NDArray
from tvm.target import Target
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .extracted_task import ExtractedTask
from .logging import get_loggers_from_work_dir
from .measure_callback import MeasureCallback
from .profiler import Profiler
from .runner import Runner
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tune import tune_tasks
from .tune_context import TuneContext
from .utils import fork_seed
if TYPE_CHECKING:
from tvm import relay
_extract_task = get_global_func( # pylint: disable=invalid-name
"relay.backend.MetaScheduleExtractTask",
allow_missing=True,
)
@contextmanager
def _autotvm_silencer():
"""A context manager that silences autotvm warnings."""
from tvm import autotvm # pylint: disable=import-outside-toplevel
silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
try:
yield
finally:
autotvm.GLOBAL_SCOPE.silent = silent
def _normalize_params(
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
pass_config: Mapping[str, Any],
executor: Optional["relay.backend.Executor"],
runtime: Optional["relay.backend.Runtime"],
) -> Tuple[
IRModule,
Target,
Dict[str, NDArray],
Dict[str, Any],
Optional["relay.backend.Executor"],
Optional["relay.backend.Runtime"],
]:
from tvm import relay # pylint: disable=import-outside-toplevel
if isinstance(mod, relay.Function):
mod = IRModule.from_expr(mod)
if not isinstance(target, Target):
target = Target(target)
if params is None:
params = {}
relay_params = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = nd.array(param)
relay_params[name] = param
if executor is None:
executor = relay.backend.Executor("graph")
if runtime is None:
runtime = relay.backend.Runtime("cpp")
if mod.get_attr("executor") is None:
mod = mod.with_attr("executor", executor)
else:
executor = mod.get_attr("executor")
pass_config = dict(pass_config)
return mod, target, relay_params, pass_config, executor, runtime
def extract_tasks(
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
*,
opt_level: int = 3,
pass_config: Mapping[str, Any] = MappingProxyType(
{
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "default",
}
),
executor: Optional["relay.backend.Executor"] = None,
runtime: Optional["relay.backend.Runtime"] = None,
module_equality: str = "structural",
disabled_pass: Optional[Union[List[str], Set[str], Tuple[str]]] = None,
instruments: Optional[Sequence[PassInstrument]] = None,
) -> List[ExtractedTask]:
"""Extract tuning tasks from a relay program.
Parameters
----------
mod : IRModule
The module or function to tune
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
opt_level : int
The optimization level of the compilation
pass_config : Mapping[str, Any]
The pass configuration
executor : Optional[relay.backend.Executor]
The executor to use
runtime : Optional[relay.backend.Runtime]
The runtime to use
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of disabled passes
instruments : Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
Returns
-------
tasks: List[ExtractedTask]
The tasks extracted from this network
"""
# pylint: disable=import-outside-toplevel
from tvm import autotvm
# pylint: enable=import-outside-toplevel
mod, target, params, pass_config, _ex, _rt = _normalize_params(
mod,
target,
params,
pass_config,
executor,
runtime,
)
if target.kind.name != "cuda" and isinstance(
autotvm.DispatchContext.current, autotvm.FallbackContext
):
tophub_context = autotvm.tophub.context(target)
else:
tophub_context = autotvm.utils.EmptyContext()
with Profiler.timeit("TaskExtraction"):
with target, _autotvm_silencer(), tophub_context:
with transform.PassContext(
opt_level=opt_level,
config=pass_config,
disabled_pass=disabled_pass,
instruments=instruments,
):
return list(_extract_task(mod, target, params, module_equality))
def extracted_tasks_to_tune_contexts(
extracted_tasks: List[ExtractedTask],
work_dir: str,
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
num_tuning_cores: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> Tuple[List[TuneContext], List[float]]:
"""Convert ExtractedTask to TuneContext.
Parameters
----------
tasks : List[ExtractedTask]
The tasks to be converted
work_dir : str
The working directory to store logs and databases
space : SpaceGenerator.SpaceGeneratorType
The space generator to use.
strategy : SearchStrategy.SearchStrategyType
The search strategy to use.
num_tuning_cores : Union[Literal["physical", "logical"], int]
The number of CPU cores to use during tuning.
seed : Optional[int]
The random seed to use.
Returns
-------
tasks : List[TuneContext]
The converted tasks
task_weights : List[float]
The weights of the tasks
"""
tasks: List[TuneContext] = []
task_weights: List[float] = []
for task, logger, rand_state in zip(
extracted_tasks,
get_loggers_from_work_dir(work_dir, [t.task_name for t in extracted_tasks]),
fork_seed(seed, n=len(extracted_tasks)),
):
tasks.append(
TuneContext(
mod=task.dispatched[0],
target=task.target,
space_generator=space,
search_strategy=strategy,
task_name=task.task_name,
logger=logger,
rand_state=rand_state,
num_threads=num_tuning_cores,
).clone()
)
task_weights.append(task.weight)
return tasks, task_weights
def tune_relay(
mod: IRModule,
params: Dict[str, NDArray],
target: Union[str, Target],
work_dir: str,
max_trials_global: int,
*,
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "gradient",
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
seed: Optional[int] = None,
module_equality: str = "structural",
num_tuning_cores: Union[Literal["physical", "logical"], int] = "physical",
disabled_pass: Optional[Union[List[str], Set[str], Tuple[str]]] = None,
instruments: Optional[Sequence[PassInstrument]] = None,
) -> Database:
"""Tune a Relay program.
Parameters
----------
mod : Union[IRModule, tir.PrimFunc]
The module or function to tune
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
target : Union[Target, str]
The compilation target
work_dir : str
The working directory to store the tuning records
max_trials_global : int
The maximum number of trials to run
max_trials_per_task : Optional[int]
The maximum number of trials to run for each task
num_trials_per_iter : int
The number of trials to run per iteration
builder : BuilderType
The builder to use
runner : RunnerType
The runner to use
database : DatabaseType
The database to use
cost_model : CostModelType
The cost model to use
measure_callbacks : CallbackListType
The measure callbacks to use
task_scheduler : TaskSchedulerType
The task scheduler to use
space : SpaceGeneratorType
The space generator to use
strategy : SearchStrategyType
The search strategy to use
seed : Optional[int]
The random seed
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
num_tuning_cores : Union[Literal["physical", "logical"], int]
The number of CPU cores to use during tuning.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of disabled passes during tasks extraction
instruments : Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
Returns
-------
database : Database
The database that contains the tuning records
"""
tasks, task_weights = extracted_tasks_to_tune_contexts(
extracted_tasks=extract_tasks(
mod,
target,
params,
module_equality=module_equality,
disabled_pass=disabled_pass,
instruments=instruments,
),
work_dir=work_dir,
space=space,
strategy=strategy,
seed=seed,
num_tuning_cores=num_tuning_cores,
)
return tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
module_equality=module_equality,
)
def compile_relay(
database: Database,
mod: IRModule,
target: Union[Target, str],
params: Optional[Dict[str, NDArray]],
*,
backend: Literal["graph", "vm"] = "graph",
opt_level: int = 3,
pass_config: Mapping[str, Any] = MappingProxyType(
{
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "default",
}
),
executor: Optional["relay.backend.Executor"] = None,
disabled_pass: Optional[Union[List[str], Set[str], Tuple[str]]] = None,
runtime: Optional["relay.backend.Runtime"] = None,
instruments: Optional[Sequence[PassInstrument]] = None,
):
"""Compile a relay program with a MetaSchedule database.
Parameters
----------
database : Database
The database to use
mod : IRModule
The Relay program to be compiled
target : tvm.target.Target
The compilation target
params : Optional[Dict[str, tvm.runtime.NDArray]]
The associated parameters of the program
backend : str
The backend to use. Builtin backends:
- "graph"
- "vm"
opt_level : int
The optimization level of the compilation
pass_config : Mapping[str, Any]
The pass configuration
executor : Optional[relay.backend.Executor]
The executor to use in relay.build. It is not supported by RelayVM.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of disabled passes
runtime : Optional[relay.backend.Runtime]
The runtime to use in relay.build. It is not supported by RelayVM.
instruments : Optional[Sequence[PassInstrument]]
The list of pass instrument implementations.
Returns
-------
lib : Union[Module, tvm.runtime.vm.Executable]
The built runtime module or vm Executable for the given relay workload.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
# pylint: enable=import-outside-toplevel
mod, target, params, pass_config, executor, runtime = _normalize_params(
mod, target, params, pass_config, executor, runtime
)
pass_config.setdefault("relay.backend.use_meta_schedule_dispatch", True)
with Profiler.timeit("PostTuningCompilation"):
with target, _autotvm_silencer(), database:
with transform.PassContext(
opt_level=opt_level,
config=pass_config,
disabled_pass=disabled_pass,
instruments=instruments,
):
if backend == "graph":
return relay.build(
mod, target=target, params=params, executor=executor, runtime=runtime
)
elif backend == "vm":
return relay.vm.compile(mod, target=target, params=params)
else:
raise ValueError(f"Unknown backend: {backend}")
def is_meta_schedule_enabled() -> bool:
"""Return whether the meta-schedule is enabled.
Returns
-------
enabled: bool
Whether the meta schedule is enabled
"""
return transform.PassContext.current().config.get(
"relay.backend.use_meta_schedule",
False,
)
| 16,173 | 34.16087 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/logging.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Logging interface in MetaSchedule"""
import logging
import logging.config
import os
import os.path as osp
from logging import Logger
from typing import Any, Callable, Dict, List, Optional
def get_logger(name: str) -> Logger:
"""Create or get a logger by its name. This is essentially a wrapper of python's native logger.
Parameters
----------
name : str
The name of the logger.
Returns
-------
logger : Logger
The logger instance.
"""
return logging.getLogger(name)
def get_logging_func(logger: Logger) -> Optional[Callable[[int, str, int, str], None]]:
"""Get the logging function.
Parameters
----------
logger : Logger
The logger instance.
Returns
-------
result : Optional[Callable]
The function to do the specified level of logging.
"""
if logger is None:
return None
level2log = {
logging.DEBUG: logger.debug,
logging.INFO: logger.info,
logging.WARNING: logger.warning,
logging.ERROR: logger.error,
# logging.FATAL not included
}
def logging_func(level: int, filename: str, lineo: int, msg: str):
if level < 0: # clear the output in notebook / console
from IPython.display import ( # type: ignore # pylint: disable=import-outside-toplevel
clear_output,
)
clear_output(wait=True)
else:
level2log[level](f"[{os.path.basename(filename)}:{lineo}] " + msg)
return logging_func
def create_loggers(
log_dir: str,
params: List[Dict[str, Any]],
logger_config: Optional[Dict[str, Any]] = None,
disable_existing_loggers: bool = False,
):
"""Create loggers from configuration"""
if logger_config is None:
config = {}
else:
config = logger_config
config.setdefault("loggers", {})
config.setdefault("handlers", {})
config.setdefault("formatters", {})
global_logger_name = "tvm.meta_schedule"
global_logger = logging.getLogger(global_logger_name)
if global_logger.level is logging.NOTSET:
global_logger.setLevel(logging.DEBUG)
console_logging_level = logging._levelToName[ # pylint: disable=protected-access
global_logger.level
]
config["loggers"].setdefault(
global_logger_name,
{
"level": logging.DEBUG,
"handlers": [handler.get_name() for handler in global_logger.handlers]
+ [global_logger_name + ".console", global_logger_name + ".file"],
"propagate": False,
},
)
config["loggers"].setdefault(
"{logger_name}",
{
"level": "DEBUG",
"handlers": [
"{logger_name}.file",
],
"propagate": False,
},
)
config["handlers"].setdefault(
global_logger_name + ".console",
{
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"formatter": "tvm.meta_schedule.standard_formatter",
"level": console_logging_level,
},
)
config["handlers"].setdefault(
global_logger_name + ".file",
{
"class": "logging.FileHandler",
"filename": "{log_dir}/" + __name__ + ".task_scheduler.log",
"mode": "a",
"level": "DEBUG",
"formatter": "tvm.meta_schedule.standard_formatter",
},
)
config["handlers"].setdefault(
"{logger_name}.file",
{
"class": "logging.FileHandler",
"filename": "{log_dir}/{logger_name}.log",
"mode": "a",
"level": "DEBUG",
"formatter": "tvm.meta_schedule.standard_formatter",
},
)
config["formatters"].setdefault(
"tvm.meta_schedule.standard_formatter",
{
"format": "%(asctime)s [%(levelname)s] %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
)
# set up dictConfig loggers
p_config = {"version": 1, "disable_existing_loggers": disable_existing_loggers}
for k, v in config.items():
if k in ["formatters", "handlers", "loggers"]:
p_config[k] = _batch_parameterize_config(v, params) # type: ignore
else:
p_config[k] = v
logging.config.dictConfig(p_config)
# check global logger
if global_logger.level not in [logging.DEBUG, logging.INFO]:
global_logger.warning(
"Logging level set to %s, please set to logging.INFO"
" or logging.DEBUG to view full log.",
logging._levelToName[global_logger.level], # pylint: disable=protected-access
)
global_logger.info("Logging directory: %s", log_dir)
def _batch_parameterize_config(
config: Dict[str, Any],
params: List[Dict[str, str]],
) -> Dict[str, Any]:
"""Parameterize the given configuration with multiple parameters sets.
Parameters
----------
config : Dict[str, Any]
The given config dict.
Params : List[Dict[str, str]]
List of the given multiple parameters sets.
Returns
-------
result : Dict[str, Any]
The parameterized configuration.
"""
results = {}
for name, cfg in config.items():
for p in params:
p_name = name.format(**p)
if p_name not in results:
p_cfg = _parameterize_config(cfg, p)
results[p_name] = p_cfg
return results
def _parameterize_config(
config: Dict[str, Any],
params: Dict[str, str],
) -> Dict[str, Any]:
"""Parameterize the given configuration.
Parameters
----------
config : Dict[str, Any]
The given config dict.
Params : Dict[str, str]
The given parameters.
Returns
-------
result : Dict[str, Any]
The parameterized configuration.
"""
result = {}
for k, v in config.items():
if isinstance(k, str):
k = k.format(**params)
if isinstance(v, str):
v = v.format(**params)
elif isinstance(v, dict):
v = _parameterize_config(v, params)
elif isinstance(v, list):
v = [t.format(**params) for t in v]
result[k] = v
return result
def get_loggers_from_work_dir(
work_dir: str,
task_names: List[str],
) -> List[Logger]:
"""Create loggers from work directory
Parameters
----------
work_dir : str
The work directory.
task_names : List[str]
The list of task names.
Returns
-------
loggers : List[Logger]
The list of loggers.
"""
log_dir = osp.join(work_dir, "logs")
os.makedirs(log_dir, exist_ok=True)
pattern = __name__ + ".task_{i:0" + f"{len(str(len(task_names) - 1))}" + "d}_{name}"
loggers = [pattern.format(i=i, name=name) for i, name in enumerate(task_names)]
create_loggers(
log_dir=log_dir,
params=[{"log_dir": log_dir, "logger_name": logger} for logger in loggers],
)
return [get_logger(logger) for logger in loggers]
| 7,876 | 28.837121 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for meta schedule"""
import ctypes
import os
import shutil
from typing import Any, Callable, List, Optional, Union
import numpy as np # type: ignore
import psutil # type: ignore
from tvm._ffi import get_global_func, register_func
from tvm.error import TVMError
from tvm.ir import Array, IRModule, Map
from tvm.rpc import RPCSession
from tvm.runtime import PackedFunc, String
from tvm.tir import FloatImm, IntImm
def derived_object(cls: type) -> type:
"""A decorator to register derived subclasses for TVM objects.
Parameters
----------
cls : type
The derived class to be registered.
Returns
-------
cls : type
The decorated TVM object.
Example
-------
.. code-block:: python
@register_object("meta_schedule.PyRunner")
class _PyRunner(meta_schedule.Runner):
def __init__(self, f_run: Callable = None):
self.__init_handle_by_constructor__(_ffi_api.RunnerPyRunner, f_run)
class PyRunner:
_tvm_metadata = {
"cls": _PyRunner,
"methods": ["run"]
}
def run(self, runner_inputs):
raise NotImplementedError
@derived_object
class LocalRunner(PyRunner):
def run(self, runner_inputs):
...
"""
import functools # pylint: disable=import-outside-toplevel
import weakref # pylint: disable=import-outside-toplevel
def _extract(inst: type, name: str):
"""Extract function from intrinsic class."""
def method(*args, **kwargs):
return getattr(inst, name)(*args, **kwargs)
if getattr(base, name) is getattr(cls, name) and name != "__str__":
# for task scheduler return None means calling default function
# otherwise it will trigger a TVMError of method not implemented
# on the c++ side when you call the method, __str__ not required
return None
return method
assert isinstance(cls.__base__, type)
assert hasattr(
cls, "_tvm_metadata"
), "Please use the user-facing method overriding class, i.e., PyRunner."
base = cls.__base__
metadata = getattr(base, "_tvm_metadata")
fields = metadata.get("fields", [])
methods = metadata.get("methods", [])
class TVMDerivedObject(metadata["cls"]): # type: ignore
"""The derived object to avoid cyclic dependency."""
def __init__(self, *args, **kwargs):
"""Constructor."""
self.handle = None
self._inst = cls(*args, **kwargs)
super().__init__(
# the constructor's parameters, builder, runner, etc.
*[getattr(self._inst, name) for name in fields],
# the function methods, init_with_tune_context, build, run, etc.
*[_extract(self._inst, name) for name in methods],
)
# for task scheduler hybrid funcs in c++ & python side
# using weakref to avoid cyclic dependency
self._inst._outer = weakref.ref(self)
def __getattr__(self, name: str):
"""Bridge the attribute function."""
try:
return self._inst.__getattribute__(name)
except AttributeError:
return super(TVMDerivedObject, self).__getattr__(name)
def __setattr__(self, name, value):
if name not in ["_inst", "key", "handle"]:
self._inst.__setattr__(name, value)
else:
super(TVMDerivedObject, self).__setattr__(name, value)
functools.update_wrapper(TVMDerivedObject.__init__, cls.__init__) # type: ignore
TVMDerivedObject.__name__ = cls.__name__
TVMDerivedObject.__doc__ = cls.__doc__
TVMDerivedObject.__module__ = cls.__module__
for key, value in cls.__dict__.items():
if isinstance(value, (classmethod, staticmethod)):
setattr(TVMDerivedObject, key, value)
return TVMDerivedObject
@register_func("meta_schedule.cpu_count")
def _cpu_count_impl(logical: bool = True) -> int:
"""Return the number of logical or physical CPUs in the system
Parameters
----------
logical : bool = True
If True, return the number of logical CPUs, otherwise return the number of physical CPUs
Returns
-------
cpu_count : int
The number of logical or physical CPUs in the system
Note
----
The meta schedule search infra intentionally does not adopt the following convention in TVM:
- C++ API `tvm::runtime::threading::MaxConcurrency()`
- Environment variable `TVM_NUM_THREADS` or
- Environment variable `OMP_NUM_THREADS`
This is because these variables are dedicated to controlling
the runtime behavior of generated kernels, instead of the host-side search.
Setting these variables may interfere the host-side search with profiling of generated kernels
when measuring locally.
"""
return psutil.cpu_count(logical=logical) or 1
def cpu_count(logical: bool = True) -> int:
"""Return the number of logical or physical CPUs in the system
Parameters
----------
logical : bool = True
If True, return the number of logical CPUs, otherwise return the number of physical CPUs
Returns
-------
cpu_count : int
The number of logical or physical CPUs in the system
Note
----
The meta schedule search infra intentionally does not adopt the following convention in TVM:
- C++ API `tvm::runtime::threading::MaxConcurrency()`
- Environment variable `TVM_NUM_THREADS` or
- Environment variable `OMP_NUM_THREADS`
This is because these variables are dedicated to controlling
the runtime behavior of generated kernels, instead of the host-side search.
Setting these variables may interfere the host-side search with profiling of generated kernels
when measuring locally.
"""
return _cpu_count_impl(logical)
@register_func("meta_schedule.using_ipython")
def _using_ipython() -> bool:
"""Return whether the current process is running in an IPython shell.
Returns
-------
result : bool
Whether the current process is running in an IPython shell.
"""
try:
return get_ipython().__class__.__name__ == "ZMQInteractiveShell" # type: ignore
except NameError:
return False
@register_func("meta_schedule.print_interactive_table")
def print_interactive_table(data: str) -> None:
"""Print the dataframe interactive table in notebook.
Parameters
----------
data : str
The serialized performance table from MetaSchedule table printer.
"""
import pandas as pd # type: ignore # pylint: disable=import-outside-toplevel
from IPython.display import display # type: ignore # pylint: disable=import-outside-toplevel
pd.set_option("display.max_rows", None)
pd.set_option("display.max_colwidth", None)
parsed = [
x.split("|")[1:] for x in list(filter(lambda x: set(x) != {"-"}, data.strip().split("\n")))
]
display(
pd.DataFrame(
parsed[1:],
columns=parsed[0],
)
)
def get_global_func_with_default_on_worker(
name: Union[None, str, Callable],
default: Callable,
) -> Callable:
"""Get the registered global function on the worker process.
Parameters
----------
name : Union[None, str, Callable]
If given a string, retrieve the function in TVM's global registry;
If given a python function, return it as it is;
Otherwise, return `default`.
default : Callable
The function to be returned if `name` is None.
Returns
-------
result : Callable
The retrieved global function or `default` if `name` is None
"""
if name is None:
return default
if callable(name):
return name
try:
return get_global_func(name)
except TVMError as error:
raise ValueError(
"Function '{name}' is not registered on the worker process. "
"The build function and export function should be registered in the worker process. "
"Note that the worker process is only aware of functions registered in TVM package, "
"if there are extra functions to be registered, "
"please send the registration logic via initializer."
) from error
def get_global_func_on_rpc_session(
session: RPCSession,
name: str,
extra_error_msg: Optional[str] = None,
) -> PackedFunc:
"""Get a PackedFunc from the global registry from an RPCSession.
Parameters
----------
session : RPCSession
The RPCSession to be retrieved from
name : str
The name of the PackedFunc
extra_error_msg : Optional[str]
Extra information to provide in the error message
Returns
-------
result : PackedFunc
The result
"""
try:
result = session.get_function(name)
except AttributeError as error:
error_msg = f'Unable to find function "{name}" on the remote RPC server.'
if extra_error_msg:
error_msg = f"{error_msg} {extra_error_msg}"
raise AttributeError(error_msg) from error
return result
@register_func("meta_schedule.remove_build_dir")
def remove_build_dir(artifact_path: str) -> None:
"""Clean up the build directory"""
shutil.rmtree(os.path.dirname(artifact_path))
def _json_de_tvm(obj: Any) -> Any:
"""Unpack a TVM nested container to a JSON object in python.
Parameters
----------
obj : Any
The TVM nested container to be unpacked.
Returns
-------
result : Any
The unpacked json object.
"""
if obj is None:
return None
if isinstance(obj, (int, float)):
return obj
if isinstance(obj, (IntImm, FloatImm)):
return obj.value
if isinstance(obj, (str, String)):
return str(obj)
if isinstance(obj, Array):
return [_json_de_tvm(i) for i in obj]
if isinstance(obj, Map):
return {_json_de_tvm(k): _json_de_tvm(v) for k, v in obj.items()}
raise TypeError("Not supported type: " + str(type(obj)))
def shash2hex(mod: IRModule) -> str:
"""Get the structural hash of a module.
Parameters
----------
mod : IRModule
The module to be hashed.
Returns
-------
result : str
The structural hash of the module.
"""
func = get_global_func("meta_schedule._SHash2Hex")
return str(func(mod))
def _get_default_str(obj: Any) -> str:
return (
# pylint: disable=protected-access
f"meta_schedule.{obj.__class__.__name__}"
+ f"({_to_hex_address(obj._outer().handle)})" # type: ignore
# pylint: enable=protected-access
)
def _to_hex_address(handle: ctypes.c_void_p) -> str:
"""Get the hexadecimal address of a handle.
Parameters
----------
handle : ctypes.c_void_p
The handle to be converted.
Returns
-------
result : str
The hexadecimal address of the handle.
"""
return hex(ctypes.cast(handle, ctypes.c_void_p).value)
def fork_seed(seed: Optional[int], n: int) -> List[int]:
# fmt: off
return np.random.RandomState(seed=seed).randint(1, 2 ** 30, size=n).tolist()
# fmt: on
| 12,150 | 31.060686 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.meta_schedule"""
from .._ffi import _init_api
_init_api("meta_schedule", __name__) # pylint: disable=protected-access
| 925 | 43.095238 | 72 | py |
tvm | tvm-main/python/tvm/meta_schedule/tir_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MetaSchedule-TIR integration"""
from typing import List, Mapping, Optional, Tuple, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import ir, tir
from tvm.target import Target
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .logging import get_loggers_from_work_dir
from .measure_callback import MeasureCallback
from .runner import Runner
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tune import tune_tasks
from .tune_context import TuneContext, _normalize_mod
from .utils import fork_seed
def tune_tir( # pylint: disable=too-many-locals
mod: Union[ir.IRModule, tir.PrimFunc],
target: Union[str, Target],
work_dir: str,
max_trials_global: int,
*,
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "gradient",
space: SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: SearchStrategy.SearchStrategyType = "evolutionary",
num_tuning_cores: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
module_equality: str = "structural",
special_space: Optional[Mapping[str, SpaceGenerator.SpaceGeneratorType]] = None,
) -> Database:
"""Tune a TIR function or an IRModule of TIR functions.
Parameters
----------
mod : Union[ir.IRModule, tir.PrimFunc]
The TIR IRModule to tune.
target : Union[str, Target]
The target to tune for.
work_dir : str
The working directory.
max_trials_global : int
The maximum number of trials to run globally.
max_trials_per_task : Optional[int]
The maximum number of trials to run per task.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
space : SpaceGenerator.SpaceGeneratorType
The space generator.
strategy : SearchStrategy.SearchStrategyType
The search strategy.
num_tuning_cores : Union[Literal["physical", "logical"], int]
The number of CPU cores to use during tuning.
seed : Optional[int]
The seed for the random number generator.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
special_space : Optional[Mapping[str, SpaceGenerator.SpaceGeneratorType]]
A mapping from task name to a special space generator for that task.
Returns
-------
database : Database
The database with all tuning records
"""
if isinstance(mod, tir.PrimFunc):
mod = _normalize_mod(mod)
named_tasks: List[Tuple[str, tir.PrimFunc]] = []
for gv, func in mod.functions.items(): # pylint: disable=invalid-name
if isinstance(func, tir.PrimFunc):
named_tasks.append((gv.name_hint, func))
named_tasks.sort(key=lambda x: x[0])
task_names = [x for x, _ in named_tasks]
tasks: List[TuneContext] = []
for task_name, task_func, logger, rand_state in zip(
task_names,
[x for _, x in named_tasks],
get_loggers_from_work_dir(work_dir, task_names),
fork_seed(seed, n=len(named_tasks)),
):
if special_space and task_name in special_space:
task_space = special_space[task_name]
else:
task_space = space
if task_space is None:
continue
tasks.append(
TuneContext(
mod=task_func,
target=target,
space_generator=task_space,
search_strategy=strategy,
task_name=task_name,
rand_state=rand_state,
num_threads=num_tuning_cores,
logger=logger,
).clone()
)
return tune_tasks(
tasks=tasks,
task_weights=[1.0] * len(tasks),
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
module_equality=module_equality,
)
def compile_tir(
database: Database,
mod: Union[ir.IRModule, tir.PrimFunc],
target: Union[Target, str],
) -> tir.Schedule:
"""Compile a TIR to tir.Schedule, according to the records in the database.
Parameters
----------
database : Database
The database of tuning records.
mod : Union[ir.IRModule, tir.PrimFunc]
The TIR function to tune.
target : Union[str, Target]
The target to tune for.
Returns
-------
sch : tir.Schedule
The best schedule found in the database.
"""
mod = _normalize_mod(mod)
if not isinstance(target, Target):
target = Target(target)
return database.query_schedule(mod, target, workload_name="main")
| 6,515 | 34.221622 | 84 | py |
tvm | tvm-main/python/tvm/meta_schedule/tune.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The core tuning API"""
from typing import List, Optional
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .measure_callback import MeasureCallback
from .runner import Runner
from .task_scheduler import TaskScheduler
from .tune_context import TuneContext
def tune_tasks(
*,
tasks: List[TuneContext],
task_weights: List[float],
work_dir: str,
max_trials_global: int,
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: Builder.BuilderType = "local",
runner: Runner.RunnerType = "local",
database: Database.DatabaseType = "json",
cost_model: CostModel.CostModelType = "xgb",
measure_callbacks: MeasureCallback.CallbackListType = "default",
task_scheduler: TaskScheduler.TaskSchedulerType = "gradient",
module_equality: str = "structural",
) -> Database:
"""Tune a list of tasks. Using a task scheduler.
Parameters
----------
tasks : List[TuneContext]
The list of tasks to tune.
task_weights : List[float]
The weight of each task.
work_dir : str
The working directory.
max_trials_global : int
The maximum number of trials to run globally.
max_trials_per_task : Optional[int]
The maximum number of trials to run per task.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during equality
testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from
a given module. The "ignore-ndarray" varint is used for the extracted blocks or in
case no anchor block is found. For the definition of the anchor block, see
tir/analysis/analysis.py.
Returns
-------
database : Database
The database with all tuning records
"""
if len(tasks) == 0:
raise ValueError("No tasks to tune.")
if len(tasks) != len(task_weights):
raise ValueError(
f"Length of tasks ({len(tasks)}) and task_weights ({len(task_weights)}) do not match."
)
num_cores = tasks[0].num_threads
if max_trials_per_task is None:
max_trials_per_task = max_trials_global
if not isinstance(builder, Builder):
builder = Builder.create(builder, max_workers=num_cores)
if not isinstance(runner, Runner):
runner = Runner.create(runner, max_workers=num_cores)
if database == "json":
database = Database.create(database, work_dir=work_dir, module_equality=module_equality)
elif not isinstance(database, Database):
database = Database.create(database, module_equality=module_equality)
if not isinstance(cost_model, CostModel):
cost_model = CostModel.create(cost_model, num_tuning_cores=num_cores)
if isinstance(measure_callbacks, MeasureCallback):
measure_callbacks = [measure_callbacks]
elif measure_callbacks == "default":
measure_callbacks = MeasureCallback.create(measure_callbacks)
if not isinstance(task_scheduler, TaskScheduler):
task_scheduler = TaskScheduler.create(task_scheduler)
task_scheduler.tune(
tasks=tasks,
task_weights=task_weights,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
measure_callbacks=measure_callbacks,
database=database,
cost_model=cost_model,
)
return database
| 5,052 | 37.572519 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Package `tvm.meta_schedule`. The meta schedule infrastructure."""
from . import (
arg_info,
builder,
cost_model,
database,
feature_extractor,
measure_callback,
mutator,
postproc,
relay_integration,
runner,
schedule,
schedule_rule,
search_strategy,
space_generator,
tir_integration,
trace_apply,
)
from .builder import Builder
from .cost_model import CostModel
from .database import Database
from .extracted_task import ExtractedTask
from .feature_extractor import FeatureExtractor
from .measure_callback import MeasureCallback
from .mutator import Mutator
from .postproc import Postproc
from .profiler import Profiler
from .relay_integration import is_meta_schedule_enabled
from .runner import Runner
from .schedule_rule import ScheduleRule
from .search_strategy import MeasureCandidate, SearchStrategy
from .space_generator import SpaceGenerator
from .task_scheduler import TaskScheduler
from .tir_integration import tune_tir
from .tune import tune_tasks
from .tune_context import TuneContext
from .utils import derived_object
| 1,880 | 33.2 | 68 | py |
tvm | tvm-main/python/tvm/meta_schedule/tune_context.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule tuning context."""
from typing import TYPE_CHECKING, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm import IRModule
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.target import Target
from tvm.tir import PrimFunc, Schedule
from . import _ffi_api
from .logging import Logger, get_logger, get_logging_func
from .utils import cpu_count
if TYPE_CHECKING:
from .cost_model import CostModel
from .database import Database
from .runner import RunnerResult
from .search_strategy import MeasureCandidate, SearchStrategy
from .space_generator import SpaceGenerator
def _normalize_mod(mod: Union[PrimFunc, IRModule]) -> IRModule:
"""Normalize the input to an IRModule"""
if isinstance(mod, PrimFunc):
if not (mod.attrs and "global_symbol" in mod.attrs):
mod = mod.with_attr("global_symbol", "main")
mod = mod.with_attr("tir.noalias", True)
mod = IRModule({"main": mod})
if not isinstance(mod, IRModule):
raise TypeError(f"Expected `mod` to be PrimFunc or IRModule, but gets: {mod}")
func_names = mod.get_global_vars()
(func_name,) = func_names
if len(func_names) == 1 and func_name.name_hint != "main":
mod = IRModule({"main": mod[func_name]})
return mod
@register_object("meta_schedule.TuneContext")
class TuneContext(Object):
"""The tune context class is designed to contain all resources for a tuning task.
Parameters
----------
mod : Optional[IRModule] = None
The workload to be optimized.
target : Optional[Target] = None
The target to be optimized for.
space_generator : Union[None, ScheduleFnType, SpaceGenerator] = None
The design space generator.
search_strategy : Union[None, SearchStrategy] = None
The search strategy.
if None, the strategy is left blank.
task_name : Optional[str] = None
The name of the tuning task.
logger : logging.Logger
The logger for the tuning task.
rand_state : int = -1
The random state.
Need to be in integer in [1, 2^31-1], -1 means using random number.
num_threads : int = None
The number of threads to be used, None means using the logical cpu count.
"""
mod: Optional[IRModule]
target: Optional[Target]
space_generator: Optional["SpaceGenerator"]
search_strategy: Optional["SearchStrategy"]
task_name: str
logger: Optional[Logger]
rand_state: int
num_threads: int
def __init__(
self,
mod: Optional[IRModule] = None,
*,
target: Union[Target, str, None] = None,
space_generator: Union["SpaceGenerator.SpaceGeneratorType", None] = None,
search_strategy: Union["SearchStrategy.SearchStrategyType", None] = None,
task_name: str = "main",
rand_state: int = -1,
num_threads: Union[int, Literal["physical", "logical"]] = "physical",
logger: Optional[Logger] = None,
):
# pylint: disable=import-outside-toplevel
import tvm.tir.tensor_intrin # pylint: disable=unused-import
from .search_strategy import SearchStrategy
from .space_generator import SpaceGenerator
# pylint: enable=import-outside-toplevel
if isinstance(mod, PrimFunc):
mod = _normalize_mod(mod)
if target is not None:
if not isinstance(target, Target):
target = Target(target)
if space_generator is not None:
if not isinstance(space_generator, SpaceGenerator):
space_generator = SpaceGenerator.create(space_generator)
if search_strategy is not None:
if not isinstance(search_strategy, SearchStrategy):
search_strategy = SearchStrategy.create(search_strategy)
if logger is None:
logger = get_logger(__name__)
if not isinstance(num_threads, int):
if num_threads == "physical":
num_threads = cpu_count(logical=False)
elif num_threads == "logical":
num_threads = cpu_count(logical=True)
else:
raise ValueError(
f"Invalid num_threads: {num_threads}, "
"should be either an integer, 'physical', or 'logical'"
)
self.__init_handle_by_constructor__(
_ffi_api.TuneContext, # type: ignore # pylint: disable=no-member
mod,
target,
space_generator,
search_strategy,
task_name,
num_threads,
rand_state,
get_logging_func(logger),
)
_ffi_api.TuneContextInitialize(self) # type: ignore # pylint: disable=no-member
def generate_design_space(self) -> List[Schedule]:
"""Generate design spaces given a module.
Delegated to self.space_generator.generate_design_space with self.mod
Returns
-------
design_spaces : List[tvm.tir.Schedule]
The generated design spaces, i.e., schedules.
"""
if self.mod is None:
raise ValueError("`mod` is not provided. Please construct TuneContext with `mod`")
if self.space_generator is None:
raise ValueError(
"space_generator is not provided."
"Please construct TuneContext with space_generator"
)
return self.space_generator.generate_design_space(self.mod)
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int = 64,
design_spaces: Optional[List[Schedule]] = None,
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""A method to be called for SearchStrategy to do necessary preparation before tuning.
Delegated to self.search_strategy.pre_tuning.
Parameters
----------
max_trials : int
The maximum number of trials to be executed.
num_trials_per_iter : int = 64
The number of trials to be executed per iteration.
design_spaces : Optional[List[tvm.tir.Schedule]]
The design spaces used during tuning process.
If None, use the outcome of `self.generate_design_space()`.
database : Optional[Database] = None
The database used during tuning process.
If None, and the search strategy is `EvolutionarySearch`,
then use `tvm.meta_schedule.database.MemoryDatabase`.
cost_model : Optional[CostModel] = None
The cost model used during tuning process.
If None, and the search strategy is `EvolutionarySearch`,
then use `tvm.meta_schedule.cost_model.RandomModel`.
"""
# pylint: disable=import-outside-toplevel
from .cost_model import RandomModel
from .database import MemoryDatabase
from .search_strategy import EvolutionarySearch
# pylint: enable=import-outside-toplevel
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
if design_spaces is None:
design_spaces = self.generate_design_space()
if database is None:
if isinstance(self.search_strategy, EvolutionarySearch):
database = MemoryDatabase() # type: ignore
if cost_model is None:
if isinstance(self.search_strategy, EvolutionarySearch):
cost_model = RandomModel() # type: ignore
return self.search_strategy.pre_tuning(
max_trials,
num_trials_per_iter,
design_spaces,
database,
cost_model,
)
def post_tuning(self) -> None:
"""A method to be called for SearchStrategy to do necessary cleanup after tuning.
Delegated to self.search_strategy.post_tuning.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.post_tuning()
def generate_measure_candidates(self) -> Optional[List["MeasureCandidate"]]:
"""Generate a batch of measure candidates from design spaces for measurement.
Delegated to self.search_strategy.generate_measure_candidates.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if search is finished.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.generate_measure_candidates()
def notify_runner_results(
self,
measure_candidates: List["MeasureCandidate"],
results: List["RunnerResult"],
) -> None:
"""Update the state in SearchStrategy with profiling results.
Delegated to self.search_strategy.notify_runner_results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
if self.search_strategy is None:
raise ValueError(
"search_strategy is not provided."
"Please construct TuneContext with search_strategy"
)
return self.search_strategy.notify_runner_results(measure_candidates, results)
def clone(self) -> "TuneContext":
"""Clone the TuneContext.
Returns
-------
cloned_context : TuneContext
The cloned TuneContext.
"""
return _ffi_api.TuneContextClone(self) # type: ignore # pylint: disable=no-member
| 10,916 | 36.90625 | 94 | py |
tvm | tvm-main/python/tvm/meta_schedule/profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=used-before-assignment
"""A context manager that profiles tuning time cost for different parts."""
from contextlib import contextmanager
from typing import Dict, Optional
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
@register_object("meta_schedule.Profiler")
class Profiler(Object):
"""Tuning time profiler."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.Profiler, # type: ignore # pylint: disable=no-member
)
def get(self) -> Dict[str, float]:
"""Get the profiling results in seconds"""
return _ffi_api.ProfilerGet(self) # type: ignore # pylint: disable=no-member
def table(self) -> str:
"""Get the profiling results in a table format"""
return _ffi_api.ProfilerTable(self) # type: ignore # pylint: disable=no-member
def __enter__(self) -> "Profiler":
"""Entering the scope of the context manager"""
_ffi_api.ProfilerEnterWithScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.ProfilerExitWithScope(self) # type: ignore # pylint: disable=no-member
@staticmethod
def current() -> Optional["Profiler"]:
"""Get the current profiler."""
return _ffi_api.ProfilerCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def timeit(name: str):
"""Timeit a block of code"""
@contextmanager
def _timeit():
try:
f = _ffi_api.ProfilerTimedScope(name) # type: ignore # pylint: disable=no-member
yield
finally:
if f:
f()
return _timeit()
| 2,636 | 35.123288 | 97 | py |
tvm | tvm-main/python/tvm/meta_schedule/search_strategy/evolutionary_search.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Evolutionary Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.EvolutionarySearch")
class EvolutionarySearch(SearchStrategy):
"""
Replay Trace Search Strategy is a search strategy that always replays the trace by removing its
decisions so that the decisions would be randomly re-generated.
Parameters
----------
population_size : int
The initial population of traces from measured samples and randomly generated samples.
init_measured_ratio : int
The ratio of measured samples in the initial population.
init_min_unmeasured : int
The minimal size of unmeasured population in the initial sampling.
max_fail_count : int
The maximum number of failure during initial sampling.
genetic_num_iters : int
The number of iterations for genetic algorithm.
genetic_mutate_prob : float
The probability of mutation.
genetic_max_fail_count : int
The maximum number to retry mutation.
eps_greedy : float
The ratio of greedy selected samples in the final picks.
"""
population_size: int
init_measured_ratio: int
init_min_unmeasured: int
genetic_num_iters: int
genetic_mutate_prob: float
genetic_max_fail_count: int
eps_greedy: float
def __init__(
self,
*,
population_size: int = 512,
init_measured_ratio: float = 0.2,
init_min_unmeasured: int = 50,
max_fail_count: int = 5,
genetic_num_iters: int = 4,
genetic_mutate_prob: float = 0.85,
genetic_max_fail_count: int = 10,
eps_greedy: float = 0.05,
) -> None:
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyEvolutionarySearch, # type: ignore # pylint: disable=no-member
population_size,
init_measured_ratio,
init_min_unmeasured,
max_fail_count,
genetic_num_iters,
genetic_mutate_prob,
genetic_max_fail_count,
eps_greedy,
)
| 2,954 | 35.036585 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/search_strategy/search_strategy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Meta Schedule search strategy that generates the measure
candidates for measurement.
"""
from typing import TYPE_CHECKING, Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
from ..arg_info import ArgInfo
from ..runner import RunnerResult
if TYPE_CHECKING:
from ..cost_model import CostModel
from ..database import Database
from ..tune_context import TuneContext
@register_object("meta_schedule.MeasureCandidate")
class MeasureCandidate(Object):
"""Measure candidate class.
Parameters
----------
sch : tvm.tir.Schedule
The schedule to be measured.
args_info : List[ArgInfo]
The argument information.
"""
sch: Schedule
args_info: List[ArgInfo]
def __init__(
self,
sch: Schedule,
args_info: List[ArgInfo],
) -> None:
"""Constructor.
Parameters
----------
sch : tvm.tir.Schedule
The schedule to be measured.
args_info : List[ArgInfo]
The argument information.
"""
self.__init_handle_by_constructor__(
_ffi_api.MeasureCandidate, # type: ignore # pylint: disable=no-member
sch,
args_info,
)
@register_object("meta_schedule.SearchStrategy")
class SearchStrategy(Object):
"""Search strategy is the class that generates the measure candidates."""
SearchStrategyType = Union[
"SearchStrategy",
Literal[
"replay-func",
"replay-trace",
"evolutionary",
],
]
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the search strategy with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initialization.
"""
_ffi_api.SearchStrategyInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int,
design_spaces: List[Schedule],
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""Pre-tuning for the search strategy.
Parameters
----------
max_trials : int
The maximum number of trials.
num_trials_per_iter : int
The number of trials per iteration.
design_spaces : List[tvm.tir.Schedule]
The design spaces used during tuning process.
database : Optional[Database] = None
The database used during tuning process.
cost_model : Optional[CostModel] = None
The cost model used during tuning process.
"""
_ffi_api.SearchStrategyPreTuning( # type: ignore # pylint: disable=no-member
self,
max_trials,
num_trials_per_iter,
design_spaces,
database,
cost_model,
)
def post_tuning(self) -> None:
"""Post-tuning for the search strategy."""
_ffi_api.SearchStrategyPostTuning(self) # type: ignore # pylint: disable=no-member
def generate_measure_candidates(self) -> Optional[List[MeasureCandidate]]:
"""Generate measure candidates from design spaces for measurement.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if finished.
"""
return _ffi_api.SearchStrategyGenerateMeasureCandidates(self) # type: ignore # pylint: disable=no-member
def notify_runner_results(
self,
measure_candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the search strategy with profiling results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
_ffi_api.SearchStrategyNotifyRunnerResults( # type: ignore # pylint: disable=no-member
self,
measure_candidates,
results,
)
def clone(self) -> "SearchStrategy":
"""Clone the search strategy.
Returns
-------
cloned : SearchStrategy
The cloned search strategy.
"""
return _ffi_api.SearchStrategyClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Literal[
"evolutionary",
"replay-trace",
"replay-func",
] = "evolutionary",
*args,
**kwargs,
) -> "SearchStrategy":
"""Create a search strategy."""
from . import ( # pylint: disable=import-outside-toplevel
EvolutionarySearch,
ReplayFunc,
ReplayTrace,
)
if kind == "evolutionary":
return EvolutionarySearch(*args, **kwargs)
if kind == "replay-trace":
return ReplayTrace(*args, **kwargs)
if kind == "replay-func":
return ReplayFunc(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown SearchStrategy: {kind}")
create = SearchStrategy.create # pylint: disable=invalid-name
@register_object("meta_schedule.PySearchStrategy")
class _PySearchStrategy(SearchStrategy):
"""
A TVM object search strategy to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PySearchStrategy
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_pre_tuning: Callable = None,
f_post_tuning: Callable = None,
f_generate_measure_candidates: Callable = None,
f_notify_runner_results: Callable = None,
f_clone: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyPySearchStrategy, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_pre_tuning,
f_post_tuning,
f_generate_measure_candidates,
f_notify_runner_results,
f_clone,
)
class PySearchStrategy:
"""
An abstract search strategy with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PySearchStrategy,
"methods": [
"_initialize_with_tune_context",
"pre_tuning",
"post_tuning",
"generate_measure_candidates",
"notify_runner_results",
"clone",
],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the search strategy with tuning context.
Parameters
----------
context : TuneContext
The tuning context for initialization.
"""
raise NotImplementedError
def pre_tuning(
self,
max_trials: int,
num_trials_per_iter: int,
design_spaces: List[Schedule],
database: Optional["Database"] = None,
cost_model: Optional["CostModel"] = None,
) -> None:
"""Pre-tuning for the search strategy.
Parameters
----------
design_spaces : List[Schedule]
The design spaces for pre-tuning.
"""
raise NotImplementedError
def post_tuning(self) -> None:
"""Post-tuning for the search strategy."""
raise NotImplementedError
def generate_measure_candidates(self) -> Optional[List[MeasureCandidate]]:
"""Generate measure candidates from design spaces for measurement.
Returns
-------
measure_candidates : Optional[List[IRModule]]
The measure candidates generated, None if finished.
"""
raise NotImplementedError
def notify_runner_results(
self,
measure_candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the search strategy with profiling results.
Parameters
----------
measure_candidates : List[MeasureCandidate]
The measure candidates for update.
results : List[RunnerResult]
The profiling results from the runner.
"""
raise NotImplementedError
def clone(self) -> SearchStrategy:
"""Clone the search strategy.
Returns
-------
strategy : SearchStrategy
The cloned search strategy.
"""
raise NotImplementedError
| 9,872 | 29.472222 | 113 | py |
tvm | tvm-main/python/tvm/meta_schedule/search_strategy/replay_trace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Replay Trace Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.ReplayTrace")
class ReplayTrace(SearchStrategy):
"""
Replay Trace Search Strategy is a search strategy that always replays the trace by removing its
decisions so that the decisions would be randomly re-generated.
Parameters
----------
max_fail_count : int
Max number of failures during trace replaying.
"""
max_fail_count: int
def __init__(self, max_fail_count: int = 100):
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyReplayTrace, # type: ignore # pylint: disable=no-member
max_fail_count,
)
| 1,582 | 34.977273 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/search_strategy/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.search_strategy package.
Meta Schedule search strategy utilizes the design spaces given
to generate measure candidates.
"""
from .evolutionary_search import EvolutionarySearch
from .replay_func import ReplayFunc
from .replay_trace import ReplayTrace
from .search_strategy import MeasureCandidate, PySearchStrategy, SearchStrategy, create
| 1,150 | 41.62963 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/search_strategy/replay_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Replay Trace Search Strategy"""
from tvm._ffi import register_object
from .. import _ffi_api
from .search_strategy import SearchStrategy
@register_object("meta_schedule.ReplayFunc")
class ReplayFunc(SearchStrategy):
"""
Replay Func Search Strategy is a search strategy that generates measure candidates by
calling a design space generator and transform the design space.
Parameters
----------
num_trials_per_iter : int
Number of trials per iteration.
max_trials_per_task : int
Total number of trials for one task
"""
def __init__(self):
"""Constructor"""
self.__init_handle_by_constructor__(
_ffi_api.SearchStrategyReplayFunc, # type: ignore # pylint: disable=no-member
)
| 1,554 | 35.162791 | 90 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/disallow_async_strided_mem_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that checks if the IRModule has any strided memory copies"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.DisallowAsyncStridedMemCopy")
class DisallowAsyncStridedMemCopy(Postproc):
"""A postprocessor that disallows schedules that use async strided mem copies."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocDisallowAsyncStridedMemCopy, # type: ignore # pylint: disable=no-member
)
| 1,352 | 41.28125 | 101 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/postproc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta Schedule Postproc."""
from typing import TYPE_CHECKING, Callable, List
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.runtime import Object
from tvm.tir.schedule import Schedule
from .. import _ffi_api
from ..utils import _get_default_str
if TYPE_CHECKING:
from ..tune_context import TuneContext
@register_object("meta_schedule.Postproc")
class Postproc(Object):
"""Rules to apply a postprocessor to a schedule."""
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the postprocessor with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the postprocessor.
"""
_ffi_api.PostprocInitializeWithTuneContext( # type: ignore # pylint: disable=no-member
self, context
)
def apply(self, sch: Schedule) -> bool:
"""Apply a postprocessor to the given schedule.
Parameters
----------
sch : tvm.tir.Schedule
The schedule to be post processed.
Returns
-------
result : bool
Whether the postprocessor was successfully applied.
"""
return _ffi_api.PostprocApply(self, sch) # type: ignore # pylint: disable=no-member
def clone(self) -> "Postproc":
"""Clone the postprocessor.
Returns
-------
cloned_postproc : Postproc
The cloned postprocessor.
"""
return _ffi_api.PostprocClone(self) # type: ignore # pylint: disable=no-member
@staticmethod
def create(kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]) -> List["Postproc"]:
"""Create a list of default postprocessors.
Parameters
----------
kind : Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"]
The kind of the postprocessors.
Returns
-------
postprocs : List[Mutator]
The list of postprocessors.
"""
funcs = {
# pylint: disable=no-member
"llvm": _ffi_api.PostprocDefaultLLVM, # type: ignore
"cuda": _ffi_api.PostprocDefaultCUDA, # type: ignore
"cuda-tensorcore": _ffi_api.PostprocDefaultCUDATensorCore, # type: ignore
"hexagon": _ffi_api.PostprocDefaultHexagon, # type: ignore
# pylint: enable=no-member
}
for k, v in funcs.items():
if k == kind:
return v()
raise ValueError(f"Unsupported kind {kind} for postproc creation.")
create = Postproc.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyPostproc")
class _PyPostproc(Postproc):
"""
A TVM object post processor to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyPostproc
"""
def __init__(
self,
f_initialize_with_tune_context: Callable = None,
f_apply: Callable = None,
f_clone: Callable = None,
f_as_string: Callable = None,
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.PostprocPyPostproc, # type: ignore # pylint: disable=no-member
f_initialize_with_tune_context,
f_apply,
f_clone,
f_as_string,
)
class PyPostproc:
"""
An abstract post processor with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyPostproc,
"methods": ["_initialize_with_tune_context", "apply", "clone", "__str__"],
}
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
"""Initialize the postprocessor with a tune context.
Parameters
----------
context : TuneContext
The tuning context for initializing the postprocessor.
"""
raise NotImplementedError
def apply(self, sch: Schedule) -> bool:
"""Apply a postprocessor to the given schedule.
Parameters
----------
sch : Schedule
The schedule to be post processed.
Returns
-------
result : bool
Whether the postprocessor was successfully applied.
"""
raise NotImplementedError
def clone(self) -> Postproc:
"""Clone the postprocessor.
Returns
-------
cloned_postproc : Postproc
The cloned postprocessor.
"""
raise NotImplementedError
def __str__(self) -> str:
"""Get the post processor as string with name.
Return
------
result : str
Get the post processor as string with name.
"""
return _get_default_str(self)
| 5,802 | 29.223958 | 96 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that applies parallelization, vectorization and auto unrolling
according to the annotation of each block"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteParallelVectorizeUnroll")
class RewriteParallelVectorizeUnroll(Postproc):
"""A postprocessor that applies parallelization, vectorization and auto unrolling
according to the annotation of each block"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteParallelVectorizeUnroll, # type: ignore # pylint: disable=no-member
)
| 1,457 | 41.882353 | 104 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/verify_vtcm_limit.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that verifies the VTCM usage of a given schedule."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.VerifyVTCMLimit")
class VerifyVTCMLimit(Postproc):
"""Verifies that the VTCM usage of a given schedule is within the provided limit."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocVerifyVTCMLimit, # type: ignore # pylint: disable=no-member
)
| 1,311 | 40 | 89 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/disallow_dynamic_loop.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that checks if the IRModule has any loop with non-constant extent"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.DisallowDynamicLoop")
class DisallowDynamicLoop(Postproc):
"""A postprocessor that checks if the IRModule has any loop with non-constant extent"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocDisallowDynamicLoop, # type: ignore # pylint: disable=no-member
)
| 1,342 | 40.96875 | 93 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/verify_gpu_code.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that verifies if the GPU code is correct"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.VerifyGPUCode")
class VerifyGPUCode(Postproc):
"""A postprocessor that verifies if the GPU code is correct"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocVerifyGPUCode, # type: ignore # pylint: disable=no-member
)
| 1,274 | 38.84375 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_unbound_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that adds thread binding to unbound blocks"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteUnboundBlock")
class RewriteUnboundBlock(Postproc):
"""A postprocessor that adds thread binding to unbound blocks"""
def __init__(self, max_threadblocks: int = 256) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteUnboundBlock, # type: ignore # pylint: disable=no-member
max_threadblocks,
)
| 1,356 | 38.911765 | 93 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that tensorize related components."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteTensorize")
class RewriteTensorize(Postproc):
"""A postprocessor that applies tensorization to annotated blocks.
Parameters
----------
vectorize_init_loop : bool
Whether or not vectorize the initialization loop produced by DecomposeReduction
"""
def __init__(self, vectorize_init_loop=False) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteTensorize, # type: ignore # pylint: disable=no-member
vectorize_init_loop,
)
| 1,498 | 37.435897 | 90 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_reduction_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites reduction block by moving the init block out."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteReductionBlock")
class RewriteReductionBlock(Postproc):
"""A postprocessor that rewrites reduction block by moving the init block out."""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteReductionBlock, # type: ignore # pylint: disable=no-member
)
| 1,336 | 40.78125 | 95 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tvm.meta_schedule.postproc package."""
from .disallow_dynamic_loop import DisallowDynamicLoop
from .disallow_async_strided_mem_copy import DisallowAsyncStridedMemCopy
from .postproc import Postproc, PyPostproc
from .rewrite_cooperative_fetch import RewriteCooperativeFetch
from .rewrite_layout import RewriteLayout
from .rewrite_parallel_vectorize_unroll import RewriteParallelVectorizeUnroll
from .rewrite_reduction_block import RewriteReductionBlock
from .rewrite_tensorize import RewriteTensorize
from .rewrite_unbound_block import RewriteUnboundBlock
from .verify_gpu_code import VerifyGPUCode
from .verify_vtcm_limit import VerifyVTCMLimit
| 1,437 | 48.586207 | 77 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites the layout of input tensor"""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteLayout")
class RewriteLayout(Postproc):
"""A postprocessor that rewrites the layout of input tensor"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteLayout, # type: ignore # pylint: disable=no-member
)
| 1,275 | 37.666667 | 87 | py |
tvm | tvm-main/python/tvm/meta_schedule/postproc/rewrite_cooperative_fetch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A postprocessor that rewrites the cooperative fetch annotation to actual
vectorized cooperative fetching in loop bindings."""
from tvm._ffi.registry import register_object
from .. import _ffi_api
from .postproc import Postproc
@register_object("meta_schedule.RewriteCooperativeFetch")
class RewriteCooperativeFetch(Postproc):
"""A postprocessor that rewrites the cooperative fetch annotation to actual vectorized
cooperative fetching in loop bindings.
"""
def __init__(self) -> None:
self.__init_handle_by_constructor__(
_ffi_api.PostprocRewriteCooperativeFetch, # type: ignore # pylint: disable=no-member
)
| 1,445 | 40.314286 | 97 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/json_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The default database that uses a JSON File to store tuning records"""
import os.path as osp
from typing import Optional
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.JSONDatabase")
class JSONDatabase(Database):
"""Database class backed by JSON.
Parameters
----------
path_workload : str
The path to the workload table.
path_tuning_record : str
The path to the tuning record table.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
path_workload: str
path_tuning_record: str
def __init__(
self,
path_workload: Optional[str] = None,
path_tuning_record: Optional[str] = None,
*,
work_dir: Optional[str] = None,
allow_missing: bool = True,
module_equality: str = "structural",
) -> None:
"""Constructor.
Parameters
----------
path_workload : Optional[str] = None
The path to the workload table. If not specified,
will be generated from `work_dir` as `$work_dir/database_workload.json`.
path_tuning_record : Optional[str] = None
The path to the tuning record table. If not specified,
will be generated from `work_dir` as `$work_dir/database_tuning_record.json`.
work_dir : Optional[str] = None
The work directory, if specified, will be used to generate `path_tuning_record`
and `path_workload`.
allow_missing : bool
Whether to create new file when the given path is not found.
"""
if work_dir is not None:
if path_workload is None:
path_workload = osp.join(work_dir, "database_workload.json")
if path_tuning_record is None:
path_tuning_record = osp.join(work_dir, "database_tuning_record.json")
if path_workload is None:
raise ValueError("`path_workload` is not specified.")
if path_tuning_record is None:
raise ValueError("`path_tuning_record` is not specified.")
self.__init_handle_by_constructor__(
_ffi_api.DatabaseJSONDatabase, # type: ignore # pylint: disable=no-member
path_workload,
path_tuning_record,
allow_missing,
module_equality,
)
| 3,827 | 40.16129 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/memory_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database that stores TuningRecords in memory"""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.MemoryDatabase")
class MemoryDatabase(Database):
"""An in-memory database
Parameters
----------
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
def __init__(
self,
module_equality: str = "structural",
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.DatabaseMemoryDatabase, # type: ignore # pylint: disable=no-member,
module_equality,
)
| 2,067 | 40.36 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/schedule_fn_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database for injecting handcrafted schedule functions."""
from typing import Callable
from tvm._ffi import register_object
from tvm.tir import Schedule
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.ScheduleFnDatabase")
class ScheduleFnDatabase(Database):
"""A database for injecting handcrafted schedule functions.
Parameters
----------
schedule_fn : Callable[[Schedule], bool],
The function to do scheduling, which takes a TIR schedule, and returns
a boolean indicating if the schedule is committed to the database.
module_equality : Optional[str]
A string to specify the module equality testing and hashing method.
It must be one of the followings:
- "structural": Use StructuralEqual/Hash
- "ignore-ndarray": Same as "structural", but ignore ndarray raw data during
equality testing and hashing.
- "anchor-block": Apply equality testing and hashing on the anchor block extracted from a
given module. The "ignore-ndarray" varint is used for the extracted
blocks or in case no anchor block is found.
For the definition of the anchor block, see tir/analysis/analysis.py.
"""
def __init__(
self,
schedule_fn: Callable[[Schedule], bool],
module_equality: str = "structural",
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.DatabaseScheduleFnDatabase, # type: ignore # pylint: disable=no-member
schedule_fn,
module_equality,
)
| 2,455 | 41.344828 | 99 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/union_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database consists of multiple databases."""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.UnionDatabase")
class UnionDatabase(Database):
"""A database composed of multiple databases, allowing users to guide IR rewriting using
combined knowledge of those databases. To each query, it returns the best record among all the
databases given.
Examples
--------
Examples below demonstrate the usecases of and difference between UnionDatabase and
OrderDatabase.
Assumption:
* db1, db2 do not have tuning records for the target workload.
* Each of db3, db4, db5 has tuning records r3, r4, r5 for target workload respectively.
.. code-block:: python
#### Case 1. `UnionDatabase`:
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 2. `OrderedUnionDatabase`
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns r3
merged_db.query_tuning_record(..., target_workload)
### Case 3. Mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.OrderedUnionDatabase( # returns r4
db4, # has r4
db5, # has r5
)
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 4. Another mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.UnionDatabase( # returns best one between r4 and r5
db4, # has r4
db5, # has r5
)
)
# returns the best one among r3, r4 and r5
merged_db.query_tuning_record(..., target_workload)
### Case 5. Yet another mix-use scenario
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
ms.database.UnionDatabase( # returns best one between r3 and r4
db3, # has r3
db4, # has r4
)
db5, # has r5
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
"""
def __init__(self, *databases: Database) -> None:
"""Construct a merged database from multiple databases.
Parameters
----------
*databases : Database
The list of databases to combine.
"""
self.__init_handle_by_constructor__(
_ffi_api.DatabaseUnionDatabase, # type: ignore # pylint: disable=no-member
databases,
)
| 3,715 | 31.884956 | 98 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TuningRecord database"""
from typing import Any, Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
from tvm._ffi import register_object
from tvm.ir.module import IRModule
from tvm.runtime import Object
from tvm.target import Target
from tvm.tir.schedule import Schedule, Trace
from .. import _ffi_api
from ..arg_info import ArgInfo
from ..utils import _json_de_tvm
@register_object("meta_schedule.Workload")
class Workload(Object):
"""A workload, i.e. an IRModule and its structural hash.
Parameters
----------
mod : IRModule
The workload's IRModule
"""
mod: IRModule
def __init__(self, mod: IRModule) -> None:
self.__init_handle_by_constructor__(
_ffi_api.Workload, # type: ignore # pylint: disable=no-member
mod,
)
def as_json(self) -> Any:
"""Export the workload to JSON as a python object.
Returns
-------
json : Any
The JSON serialized as a python object (e.g. a Dict or List).
Use json.dumps() to get the associated json string.
"""
return _json_de_tvm(_ffi_api.WorkloadAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any) -> "Workload":
"""Create a workload from a json object.
Parameters
----------
json_obj : Any
The json object to parse.
Returns
-------
tuning_record : TuningRecord
The parsed tuning record.
"""
return _ffi_api.WorkloadFromJSON(json_obj) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.TuningRecord")
class TuningRecord(Object):
"""The class of tuning records.
Parameters
----------
trace : tvm.ir.Trace
The trace of the tuning record.
workload : Workload
The workload of the tuning record.
run_secs : Optional[List[float]]
The run time of the tuning record.
target : Optional[Target]
The target of the tuning record.
args_info : Optional[List[ArgInfo]]
The argument information of the tuning record.
"""
trace: Trace
workload: Workload
run_secs: Optional[List[float]]
target: Optional[Target]
args_info: Optional[List[ArgInfo]]
def __init__( # type: ignore # pylint: disable=too-many-arguments
self,
trace: Trace,
workload: Workload,
run_secs: Optional[List[float]] = None,
target: Optional[Target] = None,
args_info: Optional[List[ArgInfo]] = None,
) -> None:
self.__init_handle_by_constructor__(
_ffi_api.TuningRecord, # type: ignore # pylint: disable=no-member
trace,
workload,
run_secs,
target,
args_info,
)
def as_measure_candidate(self) -> Any:
"""Generate a measure candidate given an initial IR module and a trace
stored in the tuning record.
Returns
-------
candidate : MeasureCandidate
A generated candidate.
"""
return _ffi_api.TuningRecordAsMeasureCandidate(self) # type: ignore # pylint: disable=no-member
def as_json(self) -> Any:
"""Export the tuning record to a JSON string.
Returns
-------
json_str : str
The JSON string exported.
"""
return _json_de_tvm(_ffi_api.TuningRecordAsJSON(self)) # type: ignore # pylint: disable=no-member
@staticmethod
def from_json(json_obj: Any, workload: Workload) -> "TuningRecord":
"""Create a tuning record from a json object.
Parameters
----------
json_obj : Any
The json object to parse.
workload : Workload
The workload.
Returns
-------
tuning_record : TuningRecord
The parsed tuning record.
"""
return _ffi_api.TuningRecordFromJSON(json_obj, workload) # type: ignore # pylint: disable=no-member
@register_object("meta_schedule.Database")
class Database(Object):
"""The abstract database interface."""
DatabaseType = Union["Database", Literal["json", "memory"]]
def has_workload(self, mod: IRModule) -> bool:
"""Check if the database has the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
Returns
-------
result : bool
Whether the database has the given workload.
"""
return _ffi_api.DatabaseHasWorkload(self, mod) # type: ignore # pylint: disable=no-member
def commit_workload(self, mod: IRModule) -> Workload:
"""Commit a workload to the database if missing.
Parameters
----------
mod : IRModule
The IRModule to be searched for or added.
Returns
-------
workload : Workload
The workload corresponding to the given IRModule.
"""
return _ffi_api.DatabaseCommitWorkload(self, mod) # type: ignore # pylint: disable=no-member
def commit_tuning_record(self, record: TuningRecord) -> None:
"""Commit a tuning record to the database.
Parameters
----------
record : TuningRecord
The tuning record to add.
"""
_ffi_api.DatabaseCommitTuningRecord(self, record) # type: ignore # pylint: disable=no-member
def get_top_k(self, workload: Workload, top_k: int) -> List[TuningRecord]:
"""Get the top K valid tuning records of given workload from the database.
Parameters
----------
workload : Workload
The workload to be searched for.
top_k : int
The number of top records to get.
Returns
-------
top_k_records : List[TuningRecord]
The top K records.
"""
return _ffi_api.DatabaseGetTopK(self, workload, top_k) # type: ignore # pylint: disable=no-member
def get_all_tuning_records(self) -> List[TuningRecord]:
"""Get all the tuning records from the database.
Returns
-------
tuning_records : List[TuningRecord]
All tuning records from the database.
"""
return _ffi_api.DatabaseGetAllTuningRecords(self) # type: ignore # pylint: disable=no-member
def __len__(self) -> int:
"""Get the number of records in the database.
Returns
-------
num_records : int
The number of records in the database
"""
return _ffi_api.DatabaseSize(self) # type: ignore # pylint: disable=no-member
def query_tuning_record(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[TuningRecord]:
"""Query the best record of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
tuning_record : Optional[TuningRecord]
The best record of the given workload; None if not found.
"""
return _ffi_api.DatabaseQueryTuningRecord(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def query_schedule(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[Schedule]:
"""Query the best schedule of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
schedule : Optional[tvm.tir.Schedule]
The best schedule of the given workload; None if not found.
"""
return _ffi_api.DatabaseQuerySchedule(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def query_ir_module(
self,
mod: IRModule,
target: Target,
workload_name: str,
) -> Optional[IRModule]:
"""Query the best IRModule of the given workload from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : str
The name of the workload to be searched for.
Returns
-------
ir_module : Optional[IRModule]
The best IRModule of the given workload; None if not found.
"""
return _ffi_api.DatabaseQueryIRModule(self, mod, target, workload_name) # type: ignore # pylint: disable=no-member
def dump_pruned(self, destination: "Database") -> None:
"""Dump the pruned database to files of JSONDatabase format.
Parameters
----------
destination : Database
The destination database to be dumped to.
"""
return _ffi_api.DatabaseDumpPruned( # type: ignore # pylint: disable=no-member
self, destination
)
def query(
self,
mod: IRModule,
target: Target,
*,
workload_name: str = "main",
kind: Union[
Literal["schedule"],
Literal["record"],
Literal["ir_module"],
] = "schedule",
) -> Union[Schedule, IRModule, TuningRecord]:
"""Query the database to retrieve the best optimization outcome of the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
kind : str = "schedule" | "record" | "ir_module"
The kind of the optimization outcome to be returned.
Returns
-------
result : Union[tvm.tir.Schedule, IRModule, TuningRecord]
The best optimization outcome of the given workload.
"""
if kind == "schedule":
return self.query_schedule(mod, target, workload_name)
if kind == "record":
return self.query_tuning_record(mod, target, workload_name)
if kind == "ir_module":
return self.query_ir_module(mod, target, workload_name)
raise ValueError(f'Unknown kind: {kind}. Candidates are: "schedule", "record", "ir_module"')
def __enter__(self) -> "Database":
"""Entering the scope of the context manager"""
_ffi_api.DatabaseEnterWithScope(self) # type: ignore # pylint: disable=no-member
return self
def __exit__(self, ptype, value, trace) -> None:
"""Exiting the scope of the context manager"""
_ffi_api.DatabaseExitWithScope(self) # type: ignore # pylint: disable=no-member
@staticmethod
def current() -> Optional["Database"]:
"""Get the current database under scope."""
return _ffi_api.DatabaseCurrent() # type: ignore # pylint: disable=no-member
@staticmethod
def create( # pylint: disable=keyword-arg-before-vararg
kind: Union[
Literal[
"json",
"memory",
"union",
"ordered_union",
],
Callable[[Schedule], bool],
] = "json",
*args,
**kwargs,
) -> "Database":
"""Create a Database.
Parameters
----------
kind : str = "json" | "memory" | "union" | "ordered_union" | Callable[[tvm.tir.Schedule],
bool]
The kind of the database to be created. The following kinds are supported:
"json", "memory", "union", "ordered_union", and a custom schedule function.
Returns
-------
database : Database
The created database.
"""
from . import ( # pylint: disable=import-outside-toplevel
JSONDatabase,
MemoryDatabase,
OrderedUnionDatabase,
ScheduleFnDatabase,
UnionDatabase,
)
if callable(kind):
return ScheduleFnDatabase(kind, *args, **kwargs) # type: ignore
if kind == "json":
return JSONDatabase(*args, **kwargs)
if kind == "memory":
return MemoryDatabase(*args, **kwargs) # type: ignore
if kind == "union":
return UnionDatabase(*args, **kwargs) # type: ignore
if kind == "ordered_union":
return OrderedUnionDatabase(*args, **kwargs) # type: ignore
raise ValueError(f"Unknown Database: {kind}")
create = Database.create # pylint: disable=invalid-name
@register_object("meta_schedule.PyDatabase")
class _PyDatabase(Database):
"""
A TVM object database to support customization on the python side.
This is NOT the user facing class for function overloading inheritance.
See also: PyDatabase
"""
def __init__(
self,
f_has_workload: Callable = None,
f_commit_workload: Callable = None,
f_commit_tuning_record: Callable = None,
f_get_top_k: Callable = None,
f_get_all_tuning_records: Callable = None,
f_query_tuning_record: Callable = None,
f_query_schedule: Callable = None,
f_query_ir_module: Callable = None,
f_size: Callable = None,
module_equality: str = "structural",
):
"""Constructor."""
self.__init_handle_by_constructor__(
_ffi_api.DatabasePyDatabase, # type: ignore # pylint: disable=no-member
f_has_workload,
f_commit_workload,
f_commit_tuning_record,
f_get_top_k,
f_get_all_tuning_records,
f_query_tuning_record,
f_query_schedule,
f_query_ir_module,
f_size,
module_equality,
)
class PyDatabase:
"""
An abstract database with customized methods on the python-side.
This is the user facing class for function overloading inheritance.
Note: @derived_object is required for proper usage of any inherited class.
"""
_tvm_metadata = {
"cls": _PyDatabase,
"methods": [
"has_workload",
"commit_workload",
"commit_tuning_record",
"get_top_k",
"get_all_tuning_records",
"query_tuning_record",
"query_schedule",
"query_ir_module",
"__len__",
],
}
def has_workload(self, mod: IRModule) -> bool:
"""Check if the database has the given workload.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
Returns
-------
result : bool
Whether the database has the given workload.
"""
raise NotImplementedError
def commit_workload(self, mod: IRModule) -> Workload:
"""Commit a workload to the database if missing.
Parameters
----------
mod : IRModule
The IRModule to be searched for or added.
Returns
-------
workload : Workload
The workload corresponding to the given IRModule.
"""
raise NotImplementedError
def commit_tuning_record(self, record: TuningRecord) -> None:
"""Commit a tuning record to the database.
Parameters
----------
record : TuningRecord
The tuning record to add.
"""
raise NotImplementedError
def get_top_k(self, workload: Workload, top_k: int) -> List[TuningRecord]:
"""Get the top K tuning records of given workload from the database.
Parameters
----------
workload : Workload
The workload to be searched for.
top_k : int
The number of top records to get.
Returns
-------
top_k_records : List[TuningRecord]
The top K records.
"""
raise NotImplementedError
def get_all_tuning_records(self) -> List[TuningRecord]:
"""Get all the tuning records from the database.
Returns
-------
tuning_records : List[TuningRecord]
All tuning records from the database.
"""
raise NotImplementedError
def query_tuning_record(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[TuningRecord]:
"""Query a tuning record from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
record : Optional[TuningRecord]
The tuning record corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQueryTuningRecord( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def query_schedule(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[Schedule]:
"""Query a schedule from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
schedule : Optional[Schedule]
The schedule corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQuerySchedule( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def query_ir_module(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[IRModule]:
"""Query an IRModule from the database.
Parameters
----------
mod : IRModule
The IRModule to be searched for.
target : Target
The target to be searched for.
workload_name : Optional[str]
The workload name to be searched for.
Returns
-------
mod : Optional[IRModule]
The IRModule corresponding to the given workload.
"""
# Using self._outer to replace the self pointer
return _ffi_api.DatabaseQueryIRModule( # type: ignore # pylint: disable=no-member
self._outer(), mod, target, workload_name # type: ignore # pylint: disable=no-member
)
def __len__(self) -> int:
"""Get the number of records in the database.
Returns
-------
num_records : int
The number of records in the database
"""
raise NotImplementedError
| 20,190 | 30.647335 | 127 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The tvm.meta_schedule.database package.
The database that stores serialized tuning records and workloads
"""
from .database import Database, PyDatabase, TuningRecord, Workload, create
from .json_database import JSONDatabase
from .memory_database import MemoryDatabase
from .ordered_union_database import OrderedUnionDatabase
from .schedule_fn_database import ScheduleFnDatabase
from .union_database import UnionDatabase
| 1,209 | 43.814815 | 74 | py |
tvm | tvm-main/python/tvm/meta_schedule/database/ordered_union_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A database consists of multiple databases."""
from tvm._ffi import register_object
from .. import _ffi_api
from .database import Database
@register_object("meta_schedule.OrderedUnionDatabase")
class OrderedUnionDatabase(Database):
"""A database composed of multiple databases, allowing users to guide IR rewriting using
combined knowledge of those databases. To each query, it returns the record from the first
database that responds to the query.
Examples
--------
Examples below demonstrate the usecases of and difference between UnionDatabase and
OrderDatabase.
Assumption:
* db1, db2 do not have tuning records for the target workload.
* Each of db3, db4, db5 has tuning records r3, r4, r5 for target workload respectively.
.. code-block:: python
#### Case 1. `UnionDatabase`:
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 2. `OrderedUnionDatabase`
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
db4 # has r4
)
# returns r3
merged_db.query_tuning_record(..., target_workload)
### Case 3. Mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.OrderedUnionDatabase( # returns r4
db4, # has r4
db5, # has r5
)
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
### Case 4. Another mix-use scenario
merged_db = ms.database.UnionDatabase(
db1, # no record
db2, # no record
db3, # has r3
ms.database.UnionDatabase( # returns best one between r4 and r5
db4, # has r4
db5, # has r5
)
)
# returns the best one among r3, r4 and r5
merged_db.query_tuning_record(..., target_workload)
### Case 5. Yet another mix-use scenario
merged_db = ms.database.OrderedUnionDatabase(
db1, # no record
db2, # no record
ms.database.UnionDatabase( # returns best one between r3 and r4
db3, # has r3
db4, # has r4
)
db5, # has r5
)
# returns the better one between r3 and r4
merged_db.query_tuning_record(..., target_workload)
"""
def __init__(self, *databases: Database) -> None:
"""Construct a merged database from multiple databases.
Parameters
----------
*databases : Database
The list of databases to combine.
"""
self.__init_handle_by_constructor__(
_ffi_api.DatabaseOrderedUnionDatabase, # type: ignore # pylint: disable=no-member
databases,
)
| 3,752 | 32.212389 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.