repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/script/parser/ir/parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The base parser for ir module"""
from ...ir_builder import ir as I
from .._core import Parser, dispatch, doc
class ModuleWithGlobalVars:
"""A Module that can add global vars during parsing, to support `Module.function` syntax."""
def __getattr__(self, attr):
# Customize the error message.
# NOTE: `__getattr__` is only called when the attribute access fails with an AttributeError
raise AttributeError(f"Cannot find the function `{attr}` in the current IRModule")
@dispatch.register(token="ir", type_name="ClassDef")
def _visit_class_def(self: Parser, node: doc.ClassDef) -> None:
"""The class definition visiting method for ir module.
Parameters
----------
self : Parser
The visiting parser.
node : doc.ClassDef
The doc AST class definition node.
"""
with self.var_table.with_frame():
with I.ir_module():
# Step 0. Add the class name to the var table
fake_module = ModuleWithGlobalVars()
self.var_table.add(node.name, fake_module)
# Step 1. Visit non-function stmts, including but not limited to
# 1. `I.module_attrs`
# 2. `I.module_global_infos`
with self.with_dispatch_token("ir"):
for stmt in node.body:
if not isinstance(stmt, doc.FunctionDef):
self.visit(stmt)
# Step 2. Visit function stmts to declare the global vars
for stmt in node.body:
if isinstance(stmt, doc.FunctionDef):
global_var = self.visit_tvm_declare_function(stmt)
fake_module.__setattr__(stmt.name, global_var)
# Step 3. Visit and parse the functions
with self.with_dispatch_token("ir"):
for stmt in node.body:
if isinstance(stmt, doc.FunctionDef):
self.visit(stmt)
@dispatch.register(token="ir", type_name="Assign")
def _visit_assign(_self: Parser, _node: doc.Assign) -> None:
"""The assign visiting method for ir module.
Parameters
----------
self : Parser
The visiting parser.
node : doc.ClassDef
The doc AST assign node.
"""
@dispatch.register(token="ir", type_name="Expr")
def _visit_expr(self: Parser, node: doc.Expr) -> None:
"""The expression visiting method for ir module.
Parameters
----------
self : Parser
The visiting parser.
node : doc.ClassDef
The doc AST expression node.
"""
self.eval_expr(node.value)
@dispatch.register(token="default", type_name="Assign")
def visit_assign(self: Parser, node: doc.Assign) -> None:
if len(node.targets) != 1:
self.report_error(node, "Consequential assignments like 'a = b = c' are not supported.")
lhs = node.targets[0]
rhs = self.eval_expr(node.value)
self.eval_assign(
target=lhs, source=rhs, bind_value=lambda _a, _b, _c, value: value, allow_shadowing=True
)
| 3,809 | 33.636364 | 99 | py |
tvm | tvm-main/python/tvm/script/parser/ir/entry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The entry point of TVM parser for ir module."""
import inspect
from typing import Type
from tvm.ir import IRModule
from .._core import parse, utils
def ir_module(mod: Type) -> IRModule:
"""The parsing method for ir module, by using `@ir_module` as decorator.
Parameters
----------
mod : Type
The class to be parsed as ir module.
Returns
-------
ir_module : IRModule
The parsed ir module.
"""
if not inspect.isclass(mod):
raise TypeError(f"Expect a class, but got: {mod}")
m = parse(mod, utils.inspect_class_capture(mod))
setattr(m, "__name__", mod.__name__)
return m
setattr(ir_module, "dispatch_token", "ir")
| 1,480 | 29.22449 | 76 | py |
tvm | tvm-main/python/tvm/script/parser/ir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The ir module parser"""
from ...ir_builder.ir import * # pylint: disable=redefined-builtin
from . import parser as _parser
from .entry import ir_module
__all__ = ["ir_module", "module_attrs"]
| 982 | 41.73913 | 67 | py |
tvm | tvm-main/python/tvm/script/parser/tir/parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The base parser for tir"""
import contextlib
import inspect
from functools import partial
from typing import Any, Union
import tvm
from tvm.ir import GlobalVar, PrimType
from tvm.tir import Buffer, IterVar, PrimExpr, Var
from ...ir_builder import ir as I
from ...ir_builder import tir as T
from ...ir_builder.base import IRBuilder
from ...ir_builder.base import IRBuilderFrame as Frame
from .._core import Parser, dispatch, doc
from ..core.parser import VarTable
from .entry import TIRMacro
def bind_with_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any:
"""Value binding methods when parsing with statement.
e.g. binding i, j, k with T.grid(128, 128, 128), when parsing
with T.grid(128, 128, 18) as i, j, k.
Parameters
----------
self : Parser
The current parser.
node : doc.expr
The doc AST expression node for error reporting.
var_name : str
The variable name.
value : Any
The value to be bound with.
Returns
-------
res : Any
The bound value.
"""
if isinstance(value, (list, tuple)):
for i, v in enumerate(value):
bind_with_value(self, node, f"{var_name}_{i}", v)
return value
elif isinstance(value, (Buffer, Var)):
IRBuilder.name(var_name, value)
return value
else:
self.report_error(node, f"Do not know how to bind type: {type(value)} in with statement")
raise NotImplementedError
def bind_for_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any:
"""Value binding methods when parsing for statement.
e.g. binding i, j, k with T.grid(128, 128, 128), when parsing
for i, j, k in T.grid(128, 128, 128).
Parameters
----------
self : Parser
The current parser.
node : doc.expr
The doc AST expression node for error reporting.
var_name : str
The variable name.
value : Any
The value to be bound with.
Returns
-------
res : Any
The bound value.
"""
if isinstance(value, (list, tuple)):
for i, v in enumerate(value):
bind_for_value(self, node, f"{var_name}_{i}", v)
return value
elif isinstance(value, Var):
IRBuilder.name(var_name, value)
return value
else:
self.report_error(node, f"Do not know how to bind type: {type(value)} in for statement")
raise NotImplementedError
def bind_assign_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any:
"""Value binding methods when parsing assign statement.
e.g. binding vi, vj, vk with T.axis.remap("SSR", [i, j, k]), when parsing
vi, vj, vk = T.axis.remap("SSR", [i, j, k]).
Parameters
----------
self : Parser
The current parser.
node : doc.expr
The doc AST expression node for error reporting.
var_name : str
The variable name.
value : Any
The value to be bound with.
Returns
-------
res : Any
The bound value.
"""
if isinstance(value, T.meta_var):
return value.value
elif isinstance(value, (list, tuple)):
for i, v in enumerate(value):
bind_assign_value(self, node, f"{var_name}_{i}", v)
return value
elif isinstance(value, Frame):
value.add_callback(partial(value.__exit__, None, None, None))
res = value.__enter__()
IRBuilder.name(var_name, res)
return res
elif isinstance(value, (Buffer, IterVar)) or (
isinstance(value, Var) and not self.var_table.exist(value)
):
IRBuilder.name(var_name, value)
return value
else:
value = tvm.runtime.convert(value)
frame = T.LetStmt(value)
var = frame.var
IRBuilder.name(var_name, var)
frame.add_callback(partial(frame.__exit__, None, None, None))
frame.__enter__()
return var
@dispatch.register(token="tir", type_name="For")
def visit_for(self: Parser, node: doc.For) -> None:
"""The for visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.For
The doc AST for node.
"""
for_frame = self.eval_expr(node.iter)
if not isinstance(for_frame, T.frame.ForFrame):
self.report_error(
node.iter,
"Expect the for loop to be one of the following: "
"range, T.serial, T.grid, T.parallel, T.vectorized, T.unroll, T.thread_binding",
)
with self.var_table.with_frame():
with for_frame as iters:
self.eval_assign(target=node.target, source=iters, bind_value=bind_for_value)
self.visit_body(node.body)
@dispatch.register(token="tir", type_name="While")
def visit_while(self: Parser, node: doc.While) -> None:
"""The while visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.While
The doc AST while node.
"""
with self.var_table.with_frame():
cond = self.eval_expr(node.test)
with T.While(cond):
self.visit_body(node.body)
@dispatch.register(token="tir", type_name="Assign")
def visit_assign(self: Parser, node: doc.Assign) -> None:
"""The assign visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.Assign
The doc AST assign node.
"""
if len(node.targets) != 1:
self.report_error(node, "Consequential assignments like 'a = b = c' are not supported.")
lhs = node.targets[0]
if isinstance(node.value, doc.Subscript):
check_slices = []
if isinstance(node.value.slice, doc.Slice):
check_slices = [node.value.slice]
elif isinstance(node.value.slice, doc.Tuple):
for p in node.value.slice.elts:
if isinstance(p, doc.Slice):
check_slices.append(p)
for s in check_slices:
if not s.step and s.upper and s.lower:
s.step = doc.Constant(
1,
None,
1,
1,
s.upper.lineno,
s.upper.end_col_offset + 1,
s.upper.lineno,
s.upper.end_col_offset + 2,
)
rhs = self.eval_expr(node.value)
if isinstance(lhs, doc.Subscript):
if isinstance(lhs.slice, doc.Tuple):
indices = []
for index in lhs.slice.elts:
indices.append(self.eval_expr(index))
else:
indices = [self.eval_expr(lhs.slice)]
T.buffer_store(self.eval_expr(lhs.value), rhs, indices)
else:
self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)
@dispatch.register(token="tir", type_name="AugAssign")
def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:
"""The augmented assign visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.AugAssign
The doc AST augmented assign node.
"""
lhs_pos = (
node.target.lineno,
node.target.col_offset,
node.target.end_lineno,
node.target.end_col_offset,
)
rhs_pos = (
node.value.lineno,
node.value.col_offset,
node.value.end_lineno,
node.value.end_col_offset,
)
node.target.ctx = doc.Load(*lhs_pos)
with self.var_table.with_frame():
lhs_name = "__tvm_tmp_value_aug_assign_lhs"
rhs_name = "__tvm_tmp_value_aug_assign_rhs"
lhs_expr = self.eval_expr(node.target)
rhs_expr = self.eval_expr(node.value)
self.var_table.add(lhs_name, lhs_expr)
self.var_table.add(rhs_name, rhs_expr)
op = doc.BinOp(
doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),
node.op,
doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),
*lhs_pos,
)
rhs = self.eval_expr(op)
lhs = node.target
lhs.ctx = doc.Store(*lhs_pos)
if isinstance(lhs, doc.Subscript):
if isinstance(lhs.slice, doc.Tuple):
indices = []
for index in lhs.slice.elts:
indices.append(self.eval_expr(index))
else:
indices = [self.eval_expr(lhs.slice)]
T.buffer_store(self.eval_expr(lhs.value), rhs, indices)
else:
self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)
@dispatch.register(token="tir", type_name="AnnAssign")
def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:
"""The annotated assign visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.AnnAssign
The doc AST annotated assign node.
"""
lhs = node.target
rhs = self.eval_expr(node.value)
ann_var = self.visit_tvm_annotation(node.annotation)
if not isinstance(ann_var, Var):
self.report_error(node.annotation, "Annotation should be Var")
self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)
frame = T.LetStmt(rhs, var=ann_var)
frame.add_callback(partial(frame.__exit__, None, None, None))
frame.__enter__()
@dispatch.register(token="tir", type_name="With")
def visit_with(self: Parser, node: doc.With) -> None:
"""The with visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.With
The doc AST with node.
"""
with contextlib.ExitStack() as stack:
stack.enter_context(self.var_table.with_frame())
for item in node.items:
frame = self.eval_expr(item.context_expr)
if not isinstance(frame, Frame):
self.report_error(
item.context_expr, "Invalid context expression in the with-statement."
)
rhs = stack.enter_context(frame)
if item.optional_vars is not None:
self.eval_assign(target=item.optional_vars, source=rhs, bind_value=bind_with_value)
self.visit_body(node.body)
@dispatch.register(token="tir", type_name="FunctionDef")
def visit_function_def(self: Parser, node: doc.FunctionDef) -> None:
"""The function definition visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.FunctionDef
The doc AST function definition node.
"""
supplied_annotation = self.function_annotations
func_annotation = supplied_annotation.get(node.name, {})
self.function_annotations = None
with self.var_table.with_frame():
self.var_table.add("range", T.serial)
with T.prim_func():
T.func_name(node.name)
if node.returns is not None:
ret_type = self.eval_expr(node.returns)
if callable(ret_type):
ret_type = PrimType(ret_type().dtype)
T.func_ret(ret_type)
with self.with_dispatch_token("tir"):
# TODO: handle different types of arguments:
# - vararg: arg | None
# - kwonlyargs: list[arg]
# - kw_defaults: list[expr | None]
# - kwarg: arg | None
# - defaults: list[expr]
# - posonlyargs: list[arg]
for arg in node.args.args:
if arg.annotation is None:
self.report_error(arg, "Type annotation required for function parameters.")
try:
ann = self.eval_expr(arg.annotation)
if callable(ann):
ann = ann()
except Exception: # pylint: disable=broad-except
ann = func_annotation.get(arg.arg, None)
if ann is None:
raise
param = T.arg(arg.arg, ann)
self.var_table.add(arg.arg, param)
self.visit_body(node.body)
self.function_annotations = supplied_annotation
@dispatch.register(token="tir", type_name="tvm_annotation")
def visit_tvm_annotation(self: Parser, node: doc.expr):
"""The TVM annotation visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.expr
The doc AST expr node.
"""
annotation = self.eval_expr(node)
if callable(annotation):
annotation = annotation()
return annotation
@dispatch.register(token="tir", type_name="Expr")
def visit_expr_stmt(self: Parser, node: doc.Expr) -> None:
"""The expr statement visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.Expr
The doc AST Expr node.
"""
if isinstance(node.value, doc.Call):
callee = self.eval_expr(node.value.func)
if isinstance(callee, TIRMacro):
return expand_macro(self, callee, node.value)
res = self.eval_expr(node.value)
if res is None:
pass
elif isinstance(res, Frame):
res.add_callback(partial(res.__exit__, None, None, None))
res.__enter__()
elif isinstance(res, PrimExpr):
T.evaluate(res)
elif isinstance(res, (int, bool)):
T.evaluate(tvm.tir.const(res))
elif isinstance(res, tvm.relay.Call) and not res.args:
# Using GlobalVar.__call__ with no arguments is ambiguous, as
# each IR has a different function Call representation. If
# this occurs, convert to the TIR representation.
T.evaluate(tvm.tir.call_tir(res.op))
elif isinstance(res, str):
# Ignore docstrings
pass
else:
self.report_error(node, f"Parsing resulted in unexpected type {type(res)}")
return None # For pylint
@dispatch.register(token="tir", type_name="If")
def visit_if(self: Parser, node: doc.If) -> None:
"""The if visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.If
The doc AST if node.
"""
with self.var_table.with_frame():
with T.If(self.eval_expr(node.test)):
with T.Then():
with self.var_table.with_frame():
self.visit_body(node.body)
if node.orelse:
with T.Else():
with self.var_table.with_frame():
self.visit_body(node.orelse)
@dispatch.register(token="tir", type_name="Assert")
def visit_assert(self: Parser, node: doc.Assert) -> None:
"""The assert visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.Assert
The doc AST assert node.
"""
cond = self.eval_expr(node.test)
msg = self.eval_expr(node.msg)
frame = T.Assert(cond, msg)
frame.add_callback(partial(frame.__exit__, None, None, None))
frame.__enter__()
@dispatch.register(token="tir", type_name="Return")
def visit_return(self: Parser, node: doc.Return) -> None:
"""The return visiting method for tir.
Parameters
----------
self : Parser
The visiting parser.
node : doc.Return
The doc AST return node.
"""
self.report_error(node, "Return is not allowed.")
@dispatch.register(token="tir", type_name="tvm_declare_function")
def visit_tvm_declare_function(self: Parser, node: doc.FunctionDef) -> GlobalVar:
"""The function declaration step for tir
Parameters
----------
self : Parser
The visiting parser.
node : doc.Return
The doc AST return node.
"""
ret_type = None
if node.returns is not None:
ret_type = self.eval_expr(node.returns)
if callable(ret_type):
ret_type = PrimType(ret_type().dtype)
# Only ret_type is needed for func_signature.
func_signature = tvm.tir.PrimFunc([], None, ret_type=ret_type)
return I.decl_function(node.name, func_signature)
def expand_macro(self: Parser, callee: TIRMacro, call: doc.Call) -> None:
"""Bind arguments to the macro invocation to the parameters in the macro definition,
and pass the macro body for further parsing.
"""
assert isinstance(callee, TIRMacro), f"Unexpected macro type {type(callee)}"
def find_macro_def(name: str, decl_list: doc.AST) -> Union[doc.FunctionDef, Any]:
for decl in decl_list:
if isinstance(decl, doc.FunctionDef) and decl.name == name:
return decl
return None
macro_def = find_macro_def(callee.__name__, callee.source_ast.body)
assert macro_def is not None, f"Invalid macro AST for {callee.__name__}"
# `macro_def` is the FunctionDef of the macro.
args = [self.eval_expr(arg) for arg in call.args]
kwargs = {kw.arg: self.eval_expr(kw.value) for kw in call.keywords}
param_binding = inspect.signature(callee.func).bind(*args, **kwargs)
param_binding.apply_defaults()
local_vars = param_binding.arguments
if callee.hygienic:
# If the macro was hygienic, construct new var_table with a single frame that
# contains the captured environment, and process the macro's body with that
# frame.
saved_var_table = self.var_table
self.var_table = VarTable()
with self.var_table.with_frame():
for k, v in callee.closure_vars.items():
self.var_table.add(k, v)
for k, v in local_vars.items():
self.var_table.add(k, v)
self.visit_body(macro_def.body)
self.var_table = saved_var_table
else:
# Otherwise, dynamically resolve symbols in the macro's body.
with self.var_table.with_frame():
for k, v in local_vars.items():
self.var_table.add(k, v)
self.visit_body(macro_def.body)
| 18,780 | 30.886248 | 99 | py |
tvm | tvm-main/python/tvm/script/parser/tir/entry.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The entry point of TVM parser for tir."""
import inspect
from typing import Any, Callable, Dict, Union
from tvm.ir.base import deprecated
from tvm.tir import Buffer, PrimFunc
from ...ir_builder.tir import buffer, ptr
from .._core import doc, parse, parse_macro, utils
def prim_func(func: Callable) -> Union[PrimFunc, Callable]:
"""The parsing method for tir prim func, by using `@prim_func` as decorator.
Parameters
----------
func : Callable
The function to be parsed as prim func.
Returns
-------
res : Union[PrimFunc, Callable]
The parsed tir prim func.
"""
if not inspect.isfunction(func):
raise TypeError(f"Expect a function, but got: {func}")
if utils.is_defined_in_class(inspect.stack(), func):
return func
f = parse(func, utils.inspect_function_capture(func))
setattr(f, "__name__", func.__name__)
return f
setattr(prim_func, "dispatch_token", "tir")
# Semantics of TIR macros:
# - Function that is decorated with @T.macro can have any parameters that
# follow Python syntax, i.e. positional, keyword, etc. Type annotations
# are not required, but are allowed.
# - Macro use follows the same syntax as a function call.
# For `macro_name(arg1, arg2, arg3, ...)`, the values are substituted into
# the body of the macro, and the body with the substituted values is then
# inserted at the point where the call to the macro is located.
class TIRMacro:
"""Representation of T.macro."""
def __init__(
self,
source_ast: doc.AST,
source_txt: str,
closure_vars: Dict[str, Any],
func: Callable,
hygienic: bool,
) -> None:
self.source_ast = source_ast
self.source_txt = source_txt
self.closure_vars = closure_vars
self.func = func
self.hygienic = hygienic
def __repr__(self):
return self.source_txt
def macro(*args, hygienic: bool = True) -> Callable:
"""Decorator for macro definitions.
Parameters
----------
hygienic: bool
Specifies whether the macro is hygienic or not.
A macro is hygienic if all symbols used in the macro's body are resolved
to values from the location of the macro definition. A non-hygienic macro
will have its symbols resolved to values at the time of the macro's use.
Example:
```
import tvm
from tvm.script import tir as T
x_value = 128
@T.macro(hygienic=True)
def static_capture(A, B):
B[()] = A[x_value] ### x_value binds to 128
@T.macro(hygienic=False)
def dynamic_capture(A, B):
B[()] = A[x_value] ### x_value will bind at the time of use
@T.prim_func
def use1(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in T.serial(10):
static_capture(A, B) ### Produces B[()] = A[128]
@T.prim_func
def use2(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in T.serial(10):
dynamic_capture(A, B) ### Produces B[()] = A[x_value]
```
"""
def _decorator(func: Callable) -> TIRMacro:
source_ast, source_txt, closure_vars = parse_macro(
func, utils.inspect_function_capture(func)
)
obj = TIRMacro(source_ast, source_txt, closure_vars, func, hygienic)
obj.__name__ = func.__name__
# We don't need to explicitly store the return value anywhere.
# This function is a decorator, so the return value will replace
# the function definition (to which the decorator it is applied)
# in that function's name space.
return obj
if len(args) == 0:
return _decorator
if len(args) == 1 and inspect.isfunction(args[0]):
return _decorator(args[0])
raise ValueError(
"Invalid use of T.macro. Usage: @T.macro, @T.macro(), @T.macro(hygienic=[True|False])"
)
# There is no dispatch_token for macro, because macro doesn't invoke parser.
class BufferProxy:
"""Buffer proxy class for constructing tir buffer."""
def __call__(
self,
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=0,
offset_factor=0,
buffer_type="",
axis_separators=None,
) -> Buffer:
return buffer(
shape,
dtype=dtype,
data=data,
strides=strides,
elem_offset=elem_offset,
scope=scope,
align=align,
offset_factor=offset_factor,
buffer_type=buffer_type,
axis_separators=axis_separators,
)
@deprecated("T.Buffer[...]", "T.Buffer(...)")
def __getitem__(self, keys) -> Buffer:
if not isinstance(keys, tuple):
return self(keys)
if len(keys) >= 2 and not isinstance(keys[1], str):
return self(keys)
return self(*keys) # type: ignore[attr-defined] # pylint: disable=no-member
class PtrProxy:
"""Ptr proxy class for constructing tir pointer."""
@deprecated("T.Ptr(...)", "T.handle(...)")
def __call__(self, dtype, storage_scope="global"):
if callable(dtype):
dtype = dtype().dtype
return ptr(dtype, storage_scope) # type: ignore[attr-defined] # pylint: disable=no-member
@deprecated("T.Ptr[...]", "T.handle(...)")
def __getitem__(self, keys):
if not isinstance(keys, tuple):
return self(keys)
return self(*keys)
Buffer = BufferProxy() # pylint: disable=invalid-name
Ptr = PtrProxy() # pylint: disable=invalid-name
| 6,560 | 31.161765 | 98 | py |
tvm | tvm-main/python/tvm/script/parser/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tir parser"""
from typing import TYPE_CHECKING
from ...ir_builder.tir import * # pylint: disable=redefined-builtin
from ...ir_builder.tir import ir as _tir
from . import operation as _operation
from . import parser as _parser
from .entry import Buffer, Ptr
if TYPE_CHECKING:
# pylint: disable=invalid-name
# Define prim_func and make it type check as static method
# so most tvmscript won't trigger pylint error here.
prim_func = staticmethod
else:
from .entry import prim_func, macro
__all__ = _tir.__all__ + ["Buffer", "Ptr", "prim_func", "macro"]
| 1,367 | 37 | 68 | py |
tvm | tvm-main/python/tvm/script/parser/tir/operation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The tir expression operation registration"""
from typing import Type
from tvm import tir
from tvm._ffi.runtime_ctypes import DataType, DataTypeCode
from tvm.tir import IntImm
from tvm.tir.expr import FloatImm
from .._core import OpMethod, doc, register_op
def _register_expr_op(ty: Type): # pylint: disable=invalid-name
ty._dispatch_type = ty # pylint: disable=protected-access
def _and(a, b):
if isinstance(a, bool):
a = IntImm("bool", a)
if isinstance(b, bool):
b = IntImm("bool", b)
if DataType(a.dtype).lanes > 1 or DataType(b.dtype).lanes > 1:
return a & b
else:
return tir.And(a, b)
def _or(a, b):
if isinstance(a, bool):
a = IntImm("bool", a)
if isinstance(b, bool):
b = IntImm("bool", b)
if DataType(a.dtype).lanes > 1 or DataType(b.dtype).lanes > 1:
return a | b
else:
return tir.Or(a, b)
def _get_type_str(dtype: str):
if DataType(dtype).lanes == 1:
return dtype
index = dtype.find("x")
return dtype[0:index]
def _auto_broadcast(a, b, op):
if isinstance(a, int):
if hasattr(b, "dtype"):
if (
DataType(b.dtype).type_code == DataTypeCode.INT
or DataType(b.dtype).type_code == DataTypeCode.UINT
):
a = IntImm(_get_type_str(b.dtype), a)
elif DataType(b.dtype).type_code == DataTypeCode.FLOAT:
a = FloatImm(_get_type_str(b.dtype), a)
elif isinstance(b, float):
a = FloatImm("float32", a)
else:
a = IntImm("int32", a)
elif isinstance(a, float):
if DataType(b.dtype).type_code == DataTypeCode.FLOAT:
a = FloatImm(_get_type_str(b.dtype), a)
else:
a = FloatImm("float32", a)
assert isinstance(a, tir.PrimExpr), "Operand should be a PrimExpr."
if isinstance(b, int):
if (
DataType(a.dtype).type_code == DataTypeCode.INT
or DataType(a.dtype).type_code == DataTypeCode.UINT
):
b = IntImm(_get_type_str(a.dtype), b)
elif DataType(a.dtype).type_code == DataTypeCode.FLOAT:
b = FloatImm(_get_type_str(a.dtype), b)
elif isinstance(b, float):
b = FloatImm(_get_type_str(a.dtype), b)
if DataType(a.dtype).lanes == DataType(b.dtype).lanes:
return op(a, b)
elif DataType(a.dtype).lanes == 1 and DataType(a.dtype).lanes != DataType(b.dtype).lanes:
broadcast_a = tir.Broadcast(a, DataType(b.dtype).lanes)
return op(broadcast_a, b)
elif DataType(b.dtype).lanes == 1 and DataType(a.dtype).lanes != DataType(b.dtype).lanes:
broadcast_b = tir.Broadcast(b, DataType(a.dtype).lanes)
return op(a, broadcast_b)
else:
raise TypeError("do not know how to deal with it.")
def _eq(a, b):
return _auto_broadcast(a, b, tir.EQ)
def _ne(a, b):
return _auto_broadcast(a, b, tir.NE)
def _lt(a, b):
return _auto_broadcast(a, b, tir.LT)
def _le(a, b):
return _auto_broadcast(a, b, tir.LE)
def _gt(a, b):
return _auto_broadcast(a, b, tir.GT)
def _ge(a, b):
return _auto_broadcast(a, b, tir.GE)
def r(op: Type, i: int, m: OpMethod): # pylint: disable=invalid-name
register_op(ty, op, i)(m)
for i in [0, 1]:
# Case 1. binop
# doc.Add <-- is overloaded
# doc.Sub <-- is overloaded
# doc.Mult <-- is overloaded
# doc.Div <-- is overloaded
# doc.FloorDiv <-- is overloaded
# doc.Mod <-- is overloaded
# doc.LShift <-- is overloaded
# doc.RShift <-- is overloaded
# doc.BitOr <-- is overloaded
# doc.BitXor <-- is overloaded
# doc.BitAnd <-- is overloaded
# doc.MatMult <-- not implemented
# doc.Pow <-- not implemented
# Case 2. cmpop
r(doc.Eq, i, _eq)
r(doc.NotEq, i, _ne)
r(doc.Lt, i, _lt)
r(doc.LtE, i, _le)
r(doc.Gt, i, _gt)
r(doc.GtE, i, _ge)
# doc.Is <-- not implemented
# doc.IsNot <-- not implemented
# doc.In <-- not implemented
# doc.NotIn <-- not implemented
# Case 3. boolop
r(doc.And, i, _and)
r(doc.Or, i, _or)
for i in [0]:
# Case 4. unaryop
# doc.Invert <-- is overloaded
r(doc.Not, i, tir.Not)
# doc.UAdd <-- is overloaded
# doc.USub <-- is overloaded
_register_expr_op(tir.PrimExpr)
_register_expr_op(tir.IterVar)
| 5,588 | 33.5 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/record.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=superfluous-parens, redefined-outer-name, redefined-outer-name,pointless-string-statement
# pylint: disable=consider-using-enumerate,invalid-name
"""Tuning record and serialization format"""
import argparse
import base64
from io import TextIOBase
import logging
import pickle
import json
import time
from typing import Union
import os
import itertools
from collections import OrderedDict
import numpy as np
from .. import build, lower
from ..target import Target
from ..contrib import popen_pool
from .. import __version__
from . import task
from .task import ConfigEntity, ApplyHistoryBest
from .measure import MeasureInput, MeasureResult
AUTOTVM_LOG_VERSION = 0.2
_old_version_warning = True
logger = logging.getLogger("autotvm")
try: # convert unicode to str for python2
_unicode = unicode
except NameError:
_unicode = ()
try:
_long = long
except NameError:
_long = int
def measure_str_key(inp, include_config=True):
"""get unique str key for MeasureInput
Parameters
----------
inp: autotvm.measure.MeasureInput
input for the measure
include_config: bool, optional
whether includes config in the str key
Returns
-------
key: str
The str representation of key
"""
config_str = str(inp.config) if include_config else ""
return "".join(
[str(inp.target), inp.task.name, str(inp.task.args), str(inp.task.kwargs), config_str]
)
def encode(inp, result, protocol="json"):
"""encode (MeasureInput, MeasureResult) pair to a string
Parameters
----------
inp: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
pair of input/result
protocol: str
log protocol, json or pickle
Returns
-------
row: str
a row in the logger file
"""
if protocol == "json":
json_dict = {
"input": (str(inp.target), inp.task.name, inp.task.args, inp.task.kwargs),
"config": inp.config.to_json_dict(),
"result": (
result.costs if result.error_no == 0 else (1e9,),
result.error_no,
result.all_cost,
result.timestamp,
),
"version": AUTOTVM_LOG_VERSION,
"tvm_version": __version__,
}
return json.dumps(json_dict)
if protocol == "pickle":
row = (
str(inp.target),
str(
base64.b64encode(
pickle.dumps([inp.task.name, inp.task.args, inp.task.kwargs])
).decode()
),
str(base64.b64encode(pickle.dumps(inp.config)).decode()),
str(base64.b64encode(pickle.dumps(tuple(result))).decode()),
str(AUTOTVM_LOG_VERSION),
str(__version__),
)
return "\t".join(row)
raise RuntimeError("Invalid log protocol: " + protocol)
def decode(row, protocol="json"):
"""Decode encoded record string to python object
Parameters
----------
row : str
a row in the logger file
protocol : str
log protocol, json or pickle
Returns
-------
ret : tuple(autotvm.measure.MeasureInput, autotvm.measure.MeasureResult), or None
The tuple of input and result, or None if input uses old version log format.
"""
# pylint: disable=unused-variable
global _old_version_warning
if protocol == "json":
row = json.loads(row)
if "v" in row and row["v"] == 0.1:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt, task_name, task_args, task_kwargs = row["input"]
tgt = str(tgt)
if "-target" in tgt:
logger.warning('"-target" is deprecated, use "-mtriple" instead.')
tgt = tgt.replace("-target", "-mtriple")
tgt = Target(str(tgt))
def clean_json_to_python(x):
"""1. Convert all list in x to tuple (hashable)
2. Convert unicode to str for python2
"""
if isinstance(x, list):
return tuple([clean_json_to_python(a) for a in x])
if isinstance(x, _unicode):
return str(x)
if isinstance(x, (_long, int)):
return int(x)
return x
tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args))
config = ConfigEntity.from_json_dict(row["config"])
inp = MeasureInput(tgt, tsk, config)
result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["result"]])
config.cost = np.mean(result.costs)
return inp, result
if protocol == "pickle":
items = row.split("\t")
if len(items) == 4:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt = Target(items[0])
task_tuple = pickle.loads(base64.b64decode(items[1].encode()))
config = pickle.loads(base64.b64decode(items[2].encode()))
result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode())))
config.cost = np.mean(result.costs)
tsk = task.Task(task_tuple[0], task_tuple[1])
return MeasureInput(tgt, tsk, config), result
raise RuntimeError("Invalid log protocol: " + protocol)
def load_from_buffer(file: TextIOBase):
"""Generator: load records from buffer.
This is a generator that yields the records.
Parameters
----------
file: io.TextIOBase
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
for row in file:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def load_from_file(filepath: Union[str, bytes, os.PathLike]):
"""Generator: load records from path.
This is a generator that yields the records.
Parameters
----------
filepath: str, bytes, or os.PathLike
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
with open(filepath) as f:
for row in f:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def split_workload(in_file, clean=True):
"""Split a log file into separate files, each of which contains only a single workload
This function can also delete duplicated records in log file
Parameters
----------
in_file: str
input filename
clean: bool
whether delete duplicated items
"""
tic = time.time()
lines = list(open(in_file).readlines())
logger.info("start converting...")
pool = popen_pool.PopenPoolExecutor()
lines = [rec for rec in pool.map(decode, lines) if rec is not None]
logger.info("map done %.2f", time.time() - tic)
wkl_dict = OrderedDict()
for inp, res in lines:
wkl = measure_str_key(inp, False)
if wkl not in wkl_dict:
wkl_dict[wkl] = []
wkl_dict[wkl].append([inp, res])
if clean:
for i, (k, v) in enumerate(wkl_dict.items()):
# clean duplicated items
added = set()
cleaned = []
for inp, res in v:
str_key = measure_str_key(inp)
if str_key in added:
continue
added.add(str_key)
cleaned.append([inp, res])
# write to file
logger.info("Key: %s\tValid: %d\tDup: %d\t", k, len(cleaned), len(v) - len(cleaned))
with open(args.i + f".{i:03d}.wkl", "w") as fout:
for inp, res in cleaned:
fout.write(encode(inp, res) + "\n")
else:
for i, (k, v) in enumerate(wkl_dict.items()):
logger.info("Key: %s\tNum: %d", k, len(v))
with open(args.i + f".{i:03d}.wkl", "w") as fout:
for inp, res in v:
fout.write(encode(inp, res) + "\n")
def pick_best(in_file, out_file):
"""
Pick the best entries from a file and store them to another file.
This function distills the useful log entries from a large log file.
If out_file already exists, the best entries from both
in_file and out_file will be saved.
Parameters
----------
in_file: str
The filename of input
out_file: str or file
The filename of output
"""
context = load_from_file(in_file)
if os.path.isfile(out_file):
out_context = load_from_file(out_file)
context = itertools.chain(context, out_context)
context, context_clone = itertools.tee(context)
best_context = ApplyHistoryBest(context)
best_set = set()
for v in best_context.best_by_model.values():
best_set.add(measure_str_key(v[0]))
for v in best_context.best_by_targetkey.values():
best_set.add(measure_str_key(v[0]))
logger.info("Extract %d best records from the %s", len(best_set), in_file)
fout = open(out_file, "w") if isinstance(out_file, str) else out_file
for inp, res in context_clone:
if measure_str_key(inp) in best_set:
fout.write(encode(inp, res) + "\n")
best_set.remove(measure_str_key(inp))
"""
Usage:
This record executable module has three modes.
* Print log file in readable format
e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code
* Extract history best from a large log file
e.g. python -m tvm.autotvm.record --mode pick --i collect.log
* Split a log file into separate files, each of which contains only a single wkl
e.g. python -m tvm.autotvm.record --mode split --i collect.log
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["read", "pick", "split"], default="read")
parser.add_argument("--i", type=str, help="input file")
parser.add_argument("--o", type=str, default=None, help="output file")
parser.add_argument("--begin", type=int, default=0)
parser.add_argument("--end", type=int, default=5)
parser.add_argument("--ir", action="store_true")
parser.add_argument("--code", action="store_true")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.mode == "pick":
args.o = args.o or args.i + ".best.log"
pick_best(args.i, args.o)
elif args.mode == "read":
for i, (inp, result) in enumerate(load_from_file(args.i)):
if args.begin <= i < args.end:
with inp.target:
s, arg_bufs = inp.task.instantiate(inp.config)
print("")
print(inp.target, inp.task, inp.config)
print(result)
if args.ir:
with inp.target:
print(lower(s, arg_bufs, simple_mode=True))
if args.code:
with inp.target:
func = build(s, arg_bufs)
print(func.imported_modules[0].get_source())
elif args.mode == "split":
split_workload(args.i)
| 12,207 | 31.211082 | 107 | py |
tvm | tvm-main/python/tvm/autotvm/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utilities"""
import logging
import time
import numpy as np
import tvm.arith
from tvm.tir import expr
from tvm.contrib.popen_pool import PopenPoolExecutor
logger = logging.getLogger("autotvm")
class EmptyContext(object):
"""An empty context"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def get_rank(values):
"""get rank of items
Parameters
----------
values: Array
Returns
-------
ranks: Array of int
the rank of this item in the input (the largest value ranks first)
"""
tmp = np.argsort(-values)
ranks = np.empty_like(tmp)
ranks[tmp] = np.arange(len(tmp))
return ranks
def pool_map(func, args, batch_size, verbose=False, pool=None):
"""A wrapper of multiprocessing.pool.Pool.map to support small-batch mapping
for large argument list. This can reduce memory usage
Parameters
----------
func: Func(arg) -> np.ndarray
mapping function
args: List
list of arguments
batch_size: int
batch size in mapping
verbose: bool, optional
whether print progress
pool: multiprocessing.Pool, optional
pool objection
Returns
-------
converted numpy array
"""
ret = None
tic = time.time()
local_pool = pool or PopenPoolExecutor()
if verbose:
logger.info("mapping begin")
for i in range(0, len(args), batch_size):
if verbose:
logger.info("mapping %d/%d elapsed %.2f", i, len(args), time.time() - tic)
tmp = np.array(local_pool.map(func, args[i : i + batch_size]))
ret = tmp if ret is None else np.concatenate((ret, tmp))
if verbose:
logger.info("mapping done")
if not pool:
local_pool.close()
return ret
def get_func_name(func):
"""Get name of a function
Parameters
----------
func: Function
The function
Returns
-------
name: str
The name
"""
return func.func_name if hasattr(func, "func_name") else func.__name__
def get_const_int(exp):
"""Verifies expr is integer and get the constant value.
Parameters
----------
exp : tvm.Expr or int
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(exp, int):
return exp
if not isinstance(exp, (expr.IntImm,)):
ana = tvm.arith.Analyzer()
exp = ana.simplify(exp)
if not isinstance(exp, (expr.IntImm,)):
raise ValueError("Expect value to be constant int")
return exp.value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm or Var, returns tuple of int or Var.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of int
The output.
"""
ret = []
for elem in in_tuple:
if isinstance(elem, expr.Var):
ret.append(elem)
elif not isinstance(elem, (expr.IntImm, int)):
ana = tvm.arith.Analyzer()
elem = ana.simplify(elem)
if not isinstance(elem, (expr.IntImm)):
ret.append(elem)
else:
ret.append(get_const_int(elem))
return tuple(ret)
SI_PREFIXES = "yzafpn\xb5m kMGTPEZY"
YOCTO_EXP10 = -24
def format_si_prefix(x, si_prefix):
exp10 = 10 ** (SI_PREFIXES.index(si_prefix) * 3 + YOCTO_EXP10)
return float(x) / exp10
| 4,283 | 24.2 | 86 | py |
tvm | tvm-main/python/tvm/autotvm/tophub.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
"""
TopHub: Tensor Operator Hub
To get the best performance, we typically need auto-tuning for the specific devices.
TVM releases pre-tuned parameters in TopHub for some common networks and hardware targets.
TVM will download these parameters for you when you call relay.build.
"""
import logging
from os import getenv
import sys
from pathlib import Path
from tvm.ir.container import Array
from .task import ApplyHistoryBest
from ..target import Target
from ..contrib.download import download
from .record import load_from_file
from .utils import EmptyContext
# environment variable to read TopHub location
AUTOTVM_TOPHUB_LOC_VAR = "TOPHUB_LOCATION"
# default location of TopHub
AUTOTVM_TOPHUB_DEFAULT_LOC = "https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub"
# value of AUTOTVM_TOPHUB_LOC_VAR to specify to not read from TopHub
AUTOTVM_TOPHUB_NONE_LOC = "NONE"
# root path to store TopHub files
AUTOTVM_TOPHUB_ROOT_PATH = Path(Path("~").expanduser(), ".tvm", "tophub")
# the version of each package
PACKAGE_VERSION = {
"arm_cpu": "v0.08",
"llvm": "v0.04",
"cuda": "v0.10",
"rocm": "v0.05",
"opencl": "v0.04",
"mali": "v0.06",
"intel_graphics": "v0.02",
"vta": "v0.10",
"amd_apu": "v0.01",
"adreno": "v0.01",
}
logger = logging.getLogger("autotvm")
def _alias(name):
"""convert alias for some packages"""
table = {
"vtacpu": "vta",
"webgpu": "opencl",
"vulkan": "opencl",
"nvptx": "cuda",
"amd_apu": "amd_apu",
"adreno": "adreno",
}
return table.get(name, name)
def _get_tophub_location():
location = getenv(AUTOTVM_TOPHUB_LOC_VAR, None)
return AUTOTVM_TOPHUB_DEFAULT_LOC if location is None else location
def context(target, extra_files=None):
"""Return the dispatch context with pre-tuned parameters.
This function will load the corresponding *.log files in AUTOTVM_TOPHUB_ROOT_PATH.
If cannot find them, it will download them from TopHub github repo.
Users can also add their own files in argument `extra_files`.
Parameters
----------
target: Target or List of Target
The compilation targets
extra_files: list of str, optional
Extra log files to load
"""
tophub_location = _get_tophub_location()
if tophub_location == AUTOTVM_TOPHUB_NONE_LOC:
return EmptyContext()
best_context = ApplyHistoryBest([])
targets = target if isinstance(target, (Array, list, tuple)) else [target]
for tgt in targets:
if isinstance(tgt, str):
tgt = Target(tgt)
# The TOPHUB file names rely on Target's device or kind. Both these types of
# information exist in Target.keys, but rules of filling this filed is not explicitly
# defined, we are afraid to rely only on Target.keys. At the same time Target.device
# is filled only if device was pointed explicitly in target string, that is not mandatory
# and in some cases we need to get information about device from Target.keys
# In priority order we verify:
# 1) Target.device
# 2) Target.keys
# 3) Target.kind
possible_names = []
device = tgt.attrs.get("device", "")
if device != "":
possible_names.append(_alias(device))
possible_names.extend(tgt.keys)
possible_names.append(tgt.kind.name)
all_packages = list(PACKAGE_VERSION.keys())
for name in possible_names:
name = _alias(name)
if name in all_packages:
if not check_backend(tophub_location, name):
continue
filename = f"{name}_{PACKAGE_VERSION[name]}.log"
best_context.load(Path(AUTOTVM_TOPHUB_ROOT_PATH, filename))
break # only load one file to avoid some fallback template mismatch problem
if extra_files:
for filename in extra_files:
best_context.load(filename)
return best_context
def check_backend(tophub_location, backend):
"""Check whether have pre-tuned parameters of the certain target.
If not, will download it.
Parameters
----------
backend: str
The name of backend.
Returns
----------
success: bool
Whether the check is successful.
"""
backend = _alias(backend)
assert backend in PACKAGE_VERSION, f'Cannot find backend "{backend}" in TopHub'
version = PACKAGE_VERSION[backend]
package_name = f"{backend}_{version}.log"
if Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name).is_file():
return True
# pylint: disable=import-outside-toplevel
if sys.version_info >= (3,):
import urllib.request as urllib2
else:
import urllib2
try:
download_package(tophub_location, package_name)
return True
except urllib2.URLError as e:
logging.warning("Failed to download tophub package for %s: %s", backend, e)
return False
def download_package(tophub_location, package_name):
"""Download pre-tuned parameters of operators for a backend
Parameters
----------
tophub_location: str
The location to download TopHub parameters from
package_name: str
The name of package
"""
rootpath = Path(AUTOTVM_TOPHUB_ROOT_PATH)
rootpath.mkdir(parents=True, exist_ok=True)
download_url = f"{tophub_location}/{package_name}"
logger.info("Download pre-tuned parameters package from %s", download_url)
download(download_url, Path(rootpath, package_name), overwrite=True)
# global cache for load_reference_log
REFERENCE_LOG_CACHE = {}
def load_reference_log(backend, model, workload_name):
"""Load reference log from TopHub to support fallback in template.
Template will use these reference logs to choose fallback config.
Parameters
----------
backend: str
The backend name
model: str
The name of the device model
workload_name: str
The name of the workload. (The first item in the workload tuple)
"""
backend = _alias(backend)
if backend not in PACKAGE_VERSION:
return []
version = PACKAGE_VERSION[backend]
package_name = f"{backend}_{version}.log"
filename = Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name)
global REFERENCE_LOG_CACHE
key = (backend, model, workload_name)
if key not in REFERENCE_LOG_CACHE:
tmp = []
# If TOPHUB_LOCATION is not AUTOTVM_TOPHUB_NONE_LOC,
# Download the config file from tophub if not exists.
if not Path(filename).exists():
tophub_location = _get_tophub_location()
if tophub_location != AUTOTVM_TOPHUB_NONE_LOC:
download_package(tophub_location, package_name)
if Path(filename).is_file(): # in case download failed
find = False
inp = None
counts = {}
for inp, res in load_from_file(filename):
counts[inp.target.model] = counts.get(inp.target.model, 0) + 1
if model == inp.target.model:
find = True
break
# if device model is not find, use the device model with the most tuned workloads
if not find and counts:
model = max(counts.items(), key=lambda k: k[1])[0]
for inp, res in load_from_file(filename):
if model == inp.target.model and inp.task.workload[0] == workload_name:
tmp.append((inp, res))
REFERENCE_LOG_CACHE[key] = tmp
return REFERENCE_LOG_CACHE[key]
| 8,399 | 32.333333 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate, invalid-name, use-list-literal
"""
Database of MeasureInput/MeasureResult pair.
This can be used for replaying measurement.
"""
import os
from .record import encode, decode, measure_str_key
class Database(object):
"""
Base class for a record database object.
"""
def load(self, inp, get_all=False):
"""
Load a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
get_all: bool, optional
Whether the latest result (or all matching results) should be returned
Returns
-------
rec: MeasureResult if previously saved, otherwise None
"""
raise NotImplementedError()
def save(self, inp, res, extend=False):
"""
Save a result based on an input's string key
Parameters
----------
inp: MeasureInput
to be translated into key for RedisDB
res: MeasureResult
to associate with key
extend:
Whether to extend existing MeasureResults if they exist
"""
raise NotImplementedError()
def filter_inputs(db, measure_inputs, retry=False):
"""
Filter a measure_inputs batch based on saved db results
Parameters
----------
db: Database
database object
measure_inputs: Array of MeasureInput
measure_inputs as expected in measure_batch
retry: bool
whether to retry if the saved result is a failure
Returns
-------
partial_results: Array of MeasureResult
a full list of result, where None denotes no corresponding saved result
unsaved: Array of MeasureInput
a list that only contains unsaved inputs
"""
partial_results = list()
unsaved = list()
for inp in measure_inputs:
res = db.load(inp)
if res is None or (retry and res.error_no != 0):
unsaved.append(inp)
partial_results.append(None)
else:
partial_results.append(res)
return partial_results, unsaved
class RedisDatabase(Database):
"""
Redis version of record database
"""
REDIS_PROD = 15
REDIS_LOCA = 14
REDIS_TEST = 13 # for unit test
REDIS_NIGHT_TEMP = 12 # for nightly report (will be flushed after every workload)
MAGIC_SPLIT = "$"
def __init__(self, db_index=REDIS_PROD):
# pylint: disable=import-outside-toplevel
import redis
if db_index == RedisDatabase.REDIS_TEST:
host = "127.0.0.1"
else:
host = os.environ.get("TVM_FLEET_HOST")
self.db = redis.StrictRedis(host=host, port=6379, db=db_index)
self.db_index = db_index
def set(self, key, value):
self.db.set(key, value)
def get(self, key):
current = self.db.get(key)
return current.decode() if isinstance(current, bytes) else current
def load(self, inp, get_all=False):
current = self.get(measure_str_key(inp))
if current is not None:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
results = [rec[1] for rec in records if rec is not None]
if get_all:
return results
return max(results, key=lambda result: result.timestamp)
return current
def save(self, inp, res, extend=False):
current = self.get(measure_str_key(inp))
if not extend or current is None:
self.set(measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join([encode(inp, res)]))
else:
current = current.split(RedisDatabase.MAGIC_SPLIT)
self.set(
measure_str_key(inp), RedisDatabase.MAGIC_SPLIT.join(current + [encode(inp, res)])
)
def filter(self, func):
"""
Dump all of the records that match the given rule
Parameters
----------
func: callable
The signature of the function is (MeasureInput, [MeasureResult]) -> bool
Returns
-------
list of records in tuple (MeasureInput, MeasureResult) matching the rule
Examples
--------
get records for a target
>>> db.filter(lambda inp, results: "cuda" in inp.target.keys)
get records with errors
>>> db.filter(lambda inp, results: any(r.error_no != 0 for r in results))
"""
matched_records = list()
# may consider filtering in iterator in the future
for key in self.db.keys():
current = self.get(key)
try:
records = [decode(x) for x in current.split(RedisDatabase.MAGIC_SPLIT)]
records = [rec for rec in records if rec is not None]
except TypeError: # got a badly formatted/old format record
continue
if not records:
continue
inps, results = zip(*records)
inp = inps[0]
if not func(inp, results):
continue
result = max(results, key=lambda res: res.timestamp)
matched_records.append((inp, result))
return matched_records
def flush(self):
self.db.flushdb()
class DummyDatabase(RedisDatabase):
"""
A database based on python dictionary for testing.
"""
def __init__(self):
# pylint: disable=super-init-not-called
self.db = {}
def set(self, key, value):
self.db[key] = value
def flush(self):
self.db = {}
| 6,389 | 30.170732 | 98 | py |
tvm | tvm-main/python/tvm/autotvm/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The auto-tuning module of tvm
This module includes:
* Tuning space definition API
* Efficient auto-tuners
* Tuning result and database support
* Distributed measurement to scale up tuning
"""
from . import database
from . import feature
from . import measure
from . import record
from . import task
from . import tuner
from . import utils
from . import env
from . import tophub
# some shortcuts
from .measure import (
measure_option,
MeasureInput,
MeasureResult,
MeasureErrorNo,
LocalBuilder,
LocalRunner,
RPCRunner,
)
from .tuner import callback
from .task import (
get_config,
create,
ConfigSpace,
ConfigEntity,
register_topi_compute,
register_topi_schedule,
template,
DispatchContext,
FallbackContext,
ApplyHistoryBest as apply_history_best,
ApplyGraphBest as apply_graph_best,
ApplyFixedConfig as apply_fixed_config,
)
from .env import GLOBAL_SCOPE
| 1,724 | 25.136364 | 62 | py |
tvm | tvm-main/python/tvm/autotvm/env.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=global-variable-not-assigned
"""Global configuration/variable scope for autotvm"""
class AutotvmGlobalScope(object):
"""The global autotvm scope."""
current = None
def __init__(self):
self._old = AutotvmGlobalScope.current
AutotvmGlobalScope.current = self
self.in_tuning = False
self.silent = False
def deep_copy(self, global_scope):
"""Deep copy from another instance of AutotvmGlobalScope."""
self._old = AutotvmGlobalScope.current
self.in_tuning = global_scope.in_tuning
self.silent = global_scope.silent
GLOBAL_SCOPE = AutotvmGlobalScope()
def reset_global_scope(global_scope):
"""Reset global autotvm state. This is needed to initialize PopenPool workers."""
global GLOBAL_SCOPE
GLOBAL_SCOPE.deep_copy(global_scope)
AutotvmGlobalScope.current = global_scope
| 1,674 | 33.183673 | 85 | py |
tvm | tvm-main/python/tvm/autotvm/feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,
"""Extract feature of iter vars
There are two types of feature
1) Itervar feature
This feature is extracted based on loop variables.
Different loop structures will result in different shapes of feature
2) Curve sample feature (relation feature)
This feature is extracted by sampling relation curve.
This feature is invariant of loop structure.
"""
import struct
import numpy as np
import tvm._ffi
from tvm.target import Target
from tvm.driver import build_module
def ana_lower(sch, args, binds=None, simple_mode=True):
"""Do lower while keeping all axes in IR
i.e. Do not eliminate loop with extent of 1, do not vectorize, unroll or inject virtual threads
"""
sch = sch.normalize()
# Phase 0
context = tvm.transform.PassContext(config={"tir.debug_keep_trivial_loop": True})
with context:
mod = build_module.schedule_to_module(sch, args, binds=binds)
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
assert simple_mode
return mod["main"].body
try:
_get_buffer_curve_sample_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetCurveSampleFeatureFlatten"
)
_get_itervar_feature = tvm._ffi.get_global_func("autotvm.feature.GetItervarFeature")
_get_itervar_feature_flatten = tvm._ffi.get_global_func(
"autotvm.feature.GetItervarFeatureFlatten"
)
except ValueError as e:
def raise_error(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError("Cannot load autotvm c++ API")
_get_buffer_curve_sample_flatten = (
_get_itervar_feature
) = _get_itervar_feature_flatten = raise_error
def get_itervar_feature(sch, args, take_log=False):
"""get features of iter vars
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
features of every axis in the IR, see doc/features.md for detail
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature(stmt, take_log)
# convert tvm node to python type
ret = []
for row in feas:
tmp = []
tmp.append([row[0][0].value, row[0][1]])
for item in row[1:]:
tmp.append([item[0].value] + [x.value for x in item[1:]])
ret.append(tmp)
return ret
def flatten_itervar_feature(fea):
"""flatten features into one-dimensional feature vectors
Parameters
----------
fea: list
return value of get_itervar_feature
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
flatten = []
for axis in fea:
for pair in axis[1:]:
flatten.append(pair[1:])
return np.concatenate(flatten)
def get_itervar_feature_flatten(sch, args, take_log=True):
"""get flatten features of iter vars
this is equivalent to get_itervar_feature + flatten_itervar_feature, but much faster.
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
take_log: bool
whether take log of numerical statics
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_itervar_feature_flatten(stmt, take_log)
feas = struct.unpack(f"{len(feas) // 4}f", feas)
return feas
def get_flatten_name(fea):
"""Get names of feature after flatten.
Parameters
----------
fea: list or str
return value of get_itervar_feature or a line of logfile
Returns
-------
feature_names: Array of str
"""
feature_name = {
"_attr_": ["length", "nest_level", "topdown", "bottomup"] + [f"ann_{i}" for i in range(20)],
"_arith_": ["add", "mul", "div"],
"buf_touch": ["stride", "mod", "count", "reuse", "T_count", "T_reuse"],
}
if isinstance(fea, str):
# pylint: disable=import-outside-toplevel
from .record import decode
# flatten line to feature
line = fea
ret = decode(line)
if ret is None:
raise ValueError("Unsupported AutoTVM log format")
inp, _ = ret
target = Target(inp.target)
with target:
s, args = inp.template.instantiate(inp.config)
fea = get_itervar_feature(s, args)
names = []
ct = 0
for row in fea:
var_name = str(row[0][1])
for pair in row[1:]:
key = pair[0]
if key in feature_name:
name_list = feature_name[key]
else:
name_list = feature_name["buf_touch"]
for i in range(len((pair[1:]))):
names.append(".".join([f"f{ct}", var_name, key, name_list[i]]))
ct += 1
return names
def get_buffer_curve_sample_flatten(sch, args, sample_n=30):
"""
Get flatten curve sample feature (relation feature)
Parameters
----------
sch: tvm.te.schedule.Schedule
args: Array of te.tensor.Tensor
the buffer args for lower
sample_n: int
number of sample points along one dimension
Returns
-------
flatten_feature: np.ndarray
one-dimensional vector
"""
stmt = ana_lower(sch, args, simple_mode=True)
feas = _get_buffer_curve_sample_flatten(stmt, sample_n, False)
feas = struct.unpack(f"{len(feas) // 4}f", feas)
return feas
| 6,390 | 28.725581 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities for autotvm"""
| 821 | 44.666667 | 62 | py |
tvm | tvm-main/python/tvm/autotvm/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import os
import warnings
from distutils.util import strtobool
import tvm
from tvm import autotvm
from tvm import meta_schedule as ms
from tvm import relay
from tvm.autotvm.graph_tuner import DPTuner
from tvm.autotvm.tuner import XGBTuner
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
help="The name of the workload to tune. Supported models: "
"https://github.com/apache/tvm/blob/main/python/tvm/meta_schedule/testing/relay_workload.py#L303-L322", # pylint: disable=line-too-long
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help="The input shape of the workload. Example: '[1, 3, 224, 224]'",
)
args.add_argument(
"--target",
type=str,
required=True,
help="The target device to tune. "
"Example: 'aws/cpu/c5.9xlarge', 'nvidia/nvidia-v100', 'nvidia/geforce-rtx-3090'",
)
args.add_argument(
"--num-trials",
type=int,
required=True,
help="The number of trials per kernel. Example: 800",
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
help="The host address of the RPC tracker. Example: 192.168.6.66",
)
args.add_argument(
"--rpc-port", type=int, required=True, help="The port of the RPC tracker. Example: 4445"
)
args.add_argument(
"--rpc-key", type=str, required=True, help="The key of the RPC tracker. Example: '3090ti'"
)
args.add_argument(
"--work-dir",
type=str,
required=True,
help="The working directory to store the tuning logs. Example: '/tmp/tune_relay'",
)
args.add_argument(
"--layout",
type=str,
default=None,
help="The layout of the workload. Example: 'NCHW', 'NHWC'",
)
args.add_argument("--cache-dir", type=str, default=None)
args.add_argument("--number", type=int, default=3)
args.add_argument("--repeat", type=int, default=1)
args.add_argument("--min-repeat-ms", type=int, default=100)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--graph-tuner",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend", type=str, choices=["graph", "vm"], help="example: graph / vm", required=True
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
if ARGS.target.kind.name != "llvm" and ARGS.graph_tuner:
raise ValueError("GraphTuner only supports llvm target")
if ARGS.target.kind.name != "llvm" and ARGS.cpu_flush:
raise ValueError("cpu_flush only supports llvm target")
if ARGS.target.kind.name == "llvm" and not ARGS.cpu_flush:
warnings.warn("cpu_flush is not enabled for llvm target")
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
graph_opt_sch_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}_graph_opt.log")
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
)
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload, ARGS.input_shape, layout=ARGS.layout, cache_dir=ARGS.cache_dir
)
input_info = [{"name": input_name, "shape": input_shape, "dtype": input_dtype}]
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
with ms.Profiler.timeit("TaskExtraction"):
# extract workloads from relay program
tasks = autotvm.task.extract_from_program(
mod["main"],
target=ARGS.target,
params=params,
ops=(
relay.op.get("nn.conv2d"),
relay.op.get("nn.conv3d"),
relay.op.get("nn.conv2d_transpose"),
relay.op.get("nn.dense"),
relay.op.get("nn.batch_matmul"),
),
)
for i, task in enumerate(tasks):
print(f"Task {i} {task.name}: {task}")
with ms.Profiler.timeit("Tuning"):
if ARGS.num_trials > 0:
for i, task in enumerate(tasks):
prefix = f"[Task {i + 1:2d}/{len(tasks):2d}] "
tuner_obj = XGBTuner(task, loss_type="reg")
n_trial = min(len(task.config_space), ARGS.num_trials)
tuner_obj.tune(
n_trial=n_trial,
early_stopping=800,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(log_file),
],
)
if ARGS.graph_tuner:
executor = DPTuner(
graph=mod["main"],
input_shapes={input_name: input_shape},
records=log_file,
target_ops=[relay.op.get("nn.conv2d")],
target=ARGS.target,
)
executor.benchmark_layout_transform(min_exec_num=1000)
executor.run()
executor.write_opt_sch2record_file(graph_opt_sch_file)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with ms.Profiler.timeit("PostTuningCompilation"):
if ARGS.graph_tuner:
ctx = autotvm.apply_graph_best(graph_opt_sch_file)
else:
ctx = autotvm.apply_history_best(log_file)
with ctx:
print("compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay_build(mod, target=ARGS.target, params=params)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| 8,433 | 35.991228 | 144 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/xgboost_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""XGBoost as cost model"""
import logging
import time
from typing import Dict
import numpy as np
from tvm.contrib.popen_pool import PopenPoolExecutor, StatusKind
from .. import feature
from ..utils import get_rank
from .metric import cover_curve, max_curve, recall_curve
from .model_based_tuner import CostModel, FeatureCache
try:
from xgboost.callback import TrainingCallback # type: ignore
except ImportError:
class TrainingCallback: # type: ignore
pass
xgb = None
logger = logging.getLogger("autotvm")
class XGBoostCostModel(CostModel):
"""XGBoost as cost model
Parameters
----------
task: Task
The tuning task
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
log_interval: int, optional
If is not none, the cost model will print training log every `log_interval` iterations.
upper_model: XGBoostCostModel, optional
The upper model used in transfer learning
"""
def __init__(
self,
task,
feature_type,
loss_type="reg",
num_threads=None,
log_interval=25,
upper_model=None,
):
global xgb
super(XGBoostCostModel, self).__init__()
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
raise ImportError(
"XGBoost is required for XGBoostCostModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
)
self.task = task
self.target = task.target
self.space = task.config_space
self.fea_type = feature_type
self.loss_type = loss_type
self.num_threads = num_threads
self.log_interval = log_interval
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type in ("rank", "rank-binary"):
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
self.bst = None
if feature_type == "itervar":
self.feature_extract_func = _extract_itervar_feature_index
elif feature_type == "knob":
self.feature_extract_func = _extract_knob_feature_index
elif feature_type == "curve":
self.feature_extract_func = _extract_curve_feature_index
else:
raise RuntimeError("Invalid feature type " + feature_type)
if upper_model: # share a same feature cache with upper model
self.feature_cache = upper_model.feature_cache
else:
self.feature_cache = FeatureCache()
self.upper_model = upper_model
self.feature_extra_ct = 0
self.pool = None
self.base_model = None
self._sample_size = 0
self._reset_pool(self.space, self.target, self.task)
def _reset_pool(self, space, target, task):
"""reset processing pool for feature extraction"""
if self.upper_model: # base model will reuse upper model's pool,
self.upper_model._reset_pool(space, target, task)
return
self._close_pool()
self.pool = PopenPoolExecutor(
max_workers=self.num_threads,
initializer=_extract_popen_initializer,
initargs=(space, target, task),
)
def _close_pool(self):
if self.pool:
self.pool = None
def _get_pool(self):
if self.upper_model:
return self.upper_model._get_pool()
return self.pool
def _base_model_discount(self):
return 1.0 / (2 ** (self._sample_size / 64.0))
def fit(self, xs, ys, plan_size):
tic = time.time()
self._reset_pool(self.space, self.target, self.task)
x_train = self._get_feature(xs)
y_train = np.array(ys)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
valid_index = y_train > 1e-6
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self._sample_size = len(x_train)
if self.base_model:
discount = self._base_model_discount()
if discount < 0.05: # discard base model
self.base_model.upper_model = None
self.base_model = None
else:
dtrain.set_base_margin(discount * self.base_model.predict(xs, output_margin=True))
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=8000,
callbacks=[
CustomCallback(
stopping_rounds=20,
metric=f"tr-a-recall@{plan_size}",
evals=[(dtrain, "tr")],
maximize=True,
fevals=[xgb_average_recalln_curve_score(plan_size)],
verbose_eval=self.log_interval,
loss_type=self.loss_type,
)
],
)
logger.debug(
"XGB train: %.2f\tobs: %d\terror: %d\tn_cache: %d",
time.time() - tic,
len(xs),
len(xs) - np.sum(valid_index),
self.feature_cache.size(self.fea_type),
)
def fit_log(self, records, plan_size, min_seed_records=500):
tic = time.time()
# filter data, only pick the data with a same task
data = []
for inp, res in records:
if inp.task.name == self.task.name:
data.append((inp, res))
logger.debug("XGB load %d entries from history log file", len(data))
# extract feature
self._reset_pool(self.space, self.target, self.task)
pool = self._get_pool()
if self.fea_type == "itervar":
feature_extract_func = _extract_itervar_feature_log
elif self.fea_type == "knob":
feature_extract_func = _extract_knob_feature_log
elif self.fea_type == "curve":
feature_extract_func = _extract_curve_feature_log
else:
raise RuntimeError("Invalid feature type: " + self.fea_type)
result = pool.map_with_error_catching(feature_extract_func, data)
result = list(result) # store results so we can iterate through them twice
# get maximum feature length
fea_len = -1
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, _ = res.value
fea_len = max(fea_len, x.shape[0])
xs, ys = [], []
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, y = res.value
# Features may not be the same size, pad them until they are
if fea_len > len(x):
xs.append(np.pad(x, (0, fea_len - len(x))))
else:
xs.append(x)
ys.append(y)
if len(xs) < min_seed_records: # no enough samples
return False
xs, ys = np.array(xs), np.array(ys)
x_train = xs
y_train = ys
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
plan_size *= 2
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=400,
callbacks=[
CustomCallback(
stopping_rounds=100,
metric=f"tr-a-recall@{plan_size}",
evals=[(dtrain, "tr")],
maximize=True,
fevals=[xgb_average_recalln_curve_score(plan_size)],
verbose_eval=self.log_interval,
loss_type=self.loss_type,
)
],
)
logger.debug("XGB train: %.2f\tobs: %d", time.time() - tic, len(xs))
return True
def predict(self, xs, output_margin=False):
feas = self._get_feature(xs)
dtest = xgb.DMatrix(feas)
if self.base_model:
dtest.set_base_margin(
self._base_model_discount() * self.base_model.predict(xs, output_margin=True)
)
return self.bst.predict(dtest, output_margin=output_margin)
def load_basemodel(self, base_model):
self.base_model = base_model
self.base_model._close_pool()
self.base_model.upper_model = self
def spawn_base_model(self):
return XGBoostCostModel(
self.task, self.fea_type, self.loss_type, self.num_threads, self.log_interval, self
)
def _get_feature(self, indexes):
"""get features for indexes, run extraction if we do not have cache for them"""
# free feature cache
if self.feature_cache.size(self.fea_type) >= 100000:
self.feature_cache.clear(self.fea_type)
fea_cache = self.feature_cache.get(self.fea_type)
indexes = np.array(indexes)
need_extract = [x for x in indexes if x not in fea_cache]
if need_extract:
pool = self._get_pool()
feas = pool.map_with_error_catching(self.feature_extract_func, need_extract)
for i, fea in zip(need_extract, feas):
fea_cache[i] = fea.value if fea.status == StatusKind.COMPLETE else None
feature_len = -1
for idx in indexes:
if fea_cache[idx] is not None:
feature_len = max(fea_cache[idx].shape[-1], feature_len)
ret = np.empty((len(indexes), feature_len), dtype=np.float32)
for i, ii in enumerate(indexes):
t = fea_cache[ii]
if t is not None and t.shape[0] < feature_len:
t = np.pad(t, (0, feature_len - t.shape[0]))
ret[i, :] = t if t is not None else 0
return ret
def __del__(self):
self._close_pool()
# Global variables for passing arguments to extract functions.
_extract_space = None
_extract_target = None
_extract_task = None
def _extract_popen_initializer(space, target, task):
global _extract_space, _extract_target, _extract_task
_extract_space = space
_extract_target = target
_extract_task = task
def _extract_itervar_feature_index(args):
"""extract iteration var feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, fargs, take_log=True)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return fea
def _extract_itervar_feature_log(arg):
"""extract iteration var feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, args, take_log=True)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_knob_feature_index(args):
"""extract knob feature for an index in extract_space"""
config = _extract_space.get(args)
return config.get_flatten_feature()
def _extract_knob_feature_log(arg):
"""extract knob feature for log items"""
inp, res = arg
config = inp.config
x = config.get_flatten_feature()
if res.error_no == 0:
with inp.target: # necessary, for calculating flops of this task
inp.task.instantiate(config)
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_curve_feature_index(args):
"""extract sampled curve feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, fargs, sample_n=20)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return np.array(fea)
def _extract_curve_feature_log(arg):
"""extract sampled curve feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _binarize_evals(evals):
"""binarize evaluation labels"""
bin_evals = []
for evalset in evals:
# binarize labels in xgb.dmatrix copy
barray = evalset[0].get_data().copy()
blabel = evalset[0].get_label().copy()
blabel[blabel < 0.5] = 0.0
blabel[blabel >= 0.5] = 1.0
# pylint: disable=R1721
bin_evals.append(tuple([xgb.DMatrix(barray, blabel)] + [e for e in evalset[1:]]))
return bin_evals
class XGBoostCallback(TrainingCallback):
"""Base class for XGBoost callbacks."""
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
raise NotImplementedError
class CustomCallback(XGBoostCallback):
"""
Callback function for xgboost.
Support custom evaluation function and early-stopping.
"""
def __init__(
self,
stopping_rounds,
metric,
fevals,
loss_type="reg",
evals=(),
log_file=None,
maximize=False,
verbose_eval=True,
skip_every=2,
):
"""Init function"""
self.stopping_rounds = stopping_rounds
self.metric = metric
self.metric_shortname = metric.split("-")[1]
self.fevals = fevals
self.evals = evals
self.log_file = log_file
self.maximize = maximize
self.verbose_eval = verbose_eval
self.loss_type = loss_type
self.skip_every = skip_every
self.state = {}
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
"""Run after each iteration. Return True when training should stop."""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
##### init state #####
if not self.state:
self.state["maximize_score"] = self.maximize
self.state["best_iteration"] = 0
if self.maximize:
self.state["best_score"] = float("-inf")
else:
self.state["best_score"] = float("inf")
assert model is not None
if model.attr("best_score") is not None:
self.state["best_score"] = float(model.attr("best_score"))
self.state["best_iteration"] = int(model.attr("best_iteration"))
self.state["best_msg"] = model.attr("best_msg")
else:
model.set_attr(best_iteration=str(self.state["best_iteration"]))
model.set_attr(best_score=str(self.state["best_score"]))
res_dict = {}
if epoch % self.skip_every == 1:
return False
##### evaluation #####
mod_evals = self.evals
if self.loss_type == "rank-binary":
mod_evals = _binarize_evals(self.evals)
if self.loss_type == "rank" and int(xgb.__version__[0]) >= 2:
# since xgboost pr#8931
raise RuntimeError(
"Use 'rank-binary' instead of 'rank' loss_type with xgboost %s >= 2.0.0"
% xgb.__version__
)
for feval in self.fevals:
bst_eval = model.eval_set(mod_evals, epoch, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
if (
not isinstance(self.verbose_eval, bool)
and self.verbose_eval
and epoch % self.verbose_eval == 0
):
infos = [f"XGB iter: {epoch:3d}"]
for item in eval_res:
if "null" in item[0]:
continue
infos.append(f"{item[0]}: {item[1]:.6f}")
logger.debug("\t".join(infos))
if self.log_file:
with open(self.log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == self.metric:
score = item[1]
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
maximize_score = self.state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res])
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
return True
return False
# feval wrapper for xgboost
def xgb_max_curve_score(N):
"""evaluate max curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
scores = labels[trials]
curve = max_curve(scores)
return f"Smax@{N}", curve[N] / np.max(labels)
return feval
def xgb_recalln_curve_score(N):
"""evaluate recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return f"recall@{N}", curve[N]
return feval
def xgb_average_recalln_curve_score(N):
"""evaluate average recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return f"a-recall@{N}", np.sum(curve[:N]) / N
return feval
def xgb_recallk_curve_score(N, topk):
"""evaluate recall-k curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks, topk)
return f"recall@{topk}", curve[N]
return feval
def xgb_cover_curve_score(N):
"""evaluate cover curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = cover_curve(ranks)
return f"cover@{N}", curve[N]
return feval
def xgb_null_score(_):
"""empty score function for xgb"""
def feval(__, ___):
return "null", 0
return feval
| 23,505 | 32.247525 | 98 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/xgboost_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
class XGBTuner(ModelBasedTuner):
"""Tuner that uses xgboost as cost model
Parameters
----------
task: Task
The tuning task
plan_size: int
The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
and do planing for the next `plan_size` trials.
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
optimizer: str or ModelOptimizer, optional
If is 'sa', use a default simulated annealing optimizer.
Otherwise it should be a ModelOptimizer object.
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick batch_size of them according to the diversity metric.
log_interval: int = 50
The verbose level.
If is 0, output nothing.
Otherwise, output debug information every `verbose` iterations.
"""
def __init__(
self,
task,
plan_size=64,
feature_type="itervar",
loss_type="reg",
num_threads=None,
optimizer="sa",
diversity_filter_ratio=None,
log_interval=50,
):
cost_model = XGBoostCostModel(
task,
feature_type=feature_type,
loss_type=loss_type,
num_threads=num_threads,
log_interval=log_interval // 2,
)
if optimizer == "sa":
optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
else:
assert isinstance(optimizer, ModelOptimizer), (
"Optimizer must be " "a supported name string" "or a ModelOptimizer object."
)
super(XGBTuner, self).__init__(
task, cost_model, optimizer, plan_size, diversity_filter_ratio
)
def tune(self, *args, **kwargs): # pylint: disable=arguments-differ
super(XGBTuner, self).tune(*args, **kwargs)
# manually close pool to avoid multiprocessing issues
self.cost_model._close_pool()
| 4,304 | 37.783784 | 94 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/ga_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name,abstract-method
"""Tuner with genetic algorithm"""
import numpy as np
from .tuner import Tuner
class GATuner(Tuner):
"""Tuner with genetic algorithm.
This tuner does not have a cost model so it always run measurement on real machines.
This tuner expands the :code:`ConfigEntity` as gene.
Parameters
----------
pop_size: int
number of genes in one generation
elite_num: int
number of elite to keep
mutation_prob: float
probability of mutation of a knob in a gene
"""
def __init__(self, task, pop_size=100, elite_num=3, mutation_prob=0.1):
super(GATuner, self).__init__(task)
# algorithm configurations
self.pop_size = pop_size
self.elite_num = elite_num
self.mutation_prob = mutation_prob
assert elite_num <= pop_size, "The number of elites must be less than population size"
# random initialization
self.pop_size = min(self.pop_size, len(self.space))
self.elite_num = min(self.pop_size, self.elite_num)
self.visited = set(self.space.sample_ints(self.pop_size))
# current generation
self.genes = [self.space.point2knob(idx) for idx in self.visited]
self.scores = []
self.elites = []
self.elite_scores = []
self.trial_pt = 0
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
gene = self.genes[self.trial_pt % self.pop_size]
self.trial_pt += 1
ret.append(self.space.get(self.space.knob2point(gene)))
return ret
def update(self, inputs, results):
for inp, res in zip(inputs, results):
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
self.scores.append(y)
else:
self.scores.append(0.0)
if len(self.scores) >= len(self.genes) and len(self.visited) < len(self.space):
next_genes = []
# There is no reason to crossover or mutate since the size of the unvisited
# is no larger than the size of the population.
if len(self.space) - len(self.visited) <= self.pop_size:
for idx in range(self.space.range_length):
if self.space.is_index_valid(idx) and idx not in self.visited:
next_genes.append(self.space.point2knob(idx))
self.visited.add(idx)
else:
genes = self.genes + self.elites
scores = np.array(self.scores[: len(self.genes)] + self.elite_scores)
# reserve elite
self.elites, self.elite_scores = [], []
elite_indexes = np.argpartition(scores, -self.elite_num)[-self.elite_num :]
for ind in elite_indexes:
self.elites.append(genes[ind])
self.elite_scores.append(scores[ind])
indices = np.arange(len(genes))
scores += 1e-8
scores /= np.max(scores)
probs = scores / np.sum(scores)
while len(next_genes) < self.pop_size:
# cross over
p1, p2 = np.random.choice(indices, size=2, replace=False, p=probs)
p1, p2 = genes[p1], genes[p2]
point = np.random.randint(len(self.space.dims))
tmp_gene = p1[:point] + p2[point:]
# mutation
for j, dim in enumerate(self.space.dims):
if np.random.random() < self.mutation_prob:
tmp_gene[j] = np.random.randint(dim)
if self.space.is_index_valid(self.space.knob2point(tmp_gene)):
next_genes.append(tmp_gene)
self.visited.add(self.space.knob2point(tmp_gene))
self.genes = next_genes
self.trial_pt = 0
self.scores = []
def has_next(self):
return len(self.visited) - (len(self.genes) - self.trial_pt) < len(self.space)
def load_history(self, data_set, min_seed_records=500):
pass
| 5,041 | 39.015873 | 94 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/sa_model_optimizer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate, invalid-name, invalid-sequence-index
"""
Cost model optimizer based on simulated annealing
"""
import heapq
import logging
import time
import numpy as np
from .model_based_tuner import ModelOptimizer
logger = logging.getLogger("autotvm")
class SimulatedAnnealingOptimizer(ModelOptimizer):
"""parallel simulated annealing optimization algorithm
Parameters
----------
task: Task
The tuning task
n_iter: int
The number of iterations of simulated annealing
temp: float or Array of float
If is a single float, then use a constant temperature.
If is an Array, then perform linear cooling from temp[0] to temp[1]
early_stop: int, optional
Stop iteration if the optimal set do not change in `early_stop` rounds
log_interval: int, optional
Print log every `log_interval` iterations
"""
def __init__(
self,
task,
n_iter=500,
temp=(1, 0),
persistent=True,
parallel_size=128,
early_stop=50,
log_interval=50,
):
super(SimulatedAnnealingOptimizer, self).__init__()
self.task = task
self.n_iter = n_iter
self.temp = temp
self.persistent = persistent
self.parallel_size = min(parallel_size, len(self.task.config_space))
self.early_stop = early_stop or 1e9
self.log_interval = log_interval
self.points = None
def find_maximums(self, model, num, exclusive):
tic = time.time()
temp, n_iter, early_stop, log_interval = (
self.temp,
self.n_iter,
self.early_stop,
self.log_interval,
)
if self.persistent and self.points is not None:
points = self.points
else:
points = self.task.config_space.sample_ints(self.parallel_size)
scores = model.predict(points)
# build heap and insert initial points
heap_items = [(float("-inf"), -1 - i) for i in range(num)]
heapq.heapify(heap_items)
in_heap = set(exclusive)
in_heap.update([x[1] for x in heap_items])
for s, p in zip(scores, points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k = 0
k_last_modify = 0
if isinstance(temp, (tuple, list, np.ndarray)):
t = temp[0]
cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)
else:
t = temp
cool = 0
while k < n_iter and k < k_last_modify + early_stop:
new_points = np.empty_like(points)
for i, p in enumerate(points):
new_points[i] = self.task.config_space.random_walk(p)
new_scores = model.predict(new_points)
ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))
ac_index = np.random.random(len(ac_prob)) < ac_prob
points[ac_index] = new_points[ac_index]
scores[ac_index] = new_scores[ac_index]
for s, p in zip(new_scores, new_points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k_last_modify = k
k += 1
t -= cool
if log_interval and k % log_interval == 0:
t_str = f"{t:.2f}"
logger.debug(
"SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t"
"elapsed: %.2f",
k,
k_last_modify,
heap_items[0][0],
np.max([v for v, _ in heap_items]),
t_str,
time.time() - tic,
)
heap_items.sort(key=lambda item: -item[0])
heap_items = [x for x in heap_items if x[0] >= 0]
logger.debug(
"SA iter: %d\tlast_update: %d\telapsed: %.2f", k, k_last_modify, time.time() - tic
)
logger.debug("SA Maximums: %s", heap_items)
if self.persistent:
self.points = points
return [x[1] for x in heap_items]
| 5,165 | 32.115385 | 94 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/droplet_turner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner with droplet algorithm"""
import logging
import numpy as np
from scipy import stats
from .tuner import Tuner
LOGGER = logging.getLogger("autotvm")
class DropletTuner(Tuner):
"""Tuner with droplet algorithm.
Parameters
----------
start_position: list of int
position initial of the space, the default is [0, 0, ..., 0]
pvalue: float
statistical value to confidence level, the default is 0.05
"""
def __init__(self, task, start_position=None, pvalue=0.05):
super(DropletTuner, self).__init__(task)
# space info
self.space = task.config_space
self.dims = []
for _, v in self.space.space_map.items():
self.dims.append(len(v))
# start position
start_position = [0] * len(self.dims) if start_position is None else start_position
self.best_choice = (-1, [0] * len(self.dims), [99999])
self.visited = set([self.space.knob2point(start_position)])
self.execution, self.total_execution, self.batch = 1, max(self.dims), 16
self.pvalue, self.step = pvalue, 1
self.next = [(self.space.knob2point(start_position), start_position)]
def num_to_bin(self, value, factor=1):
bin_format = str(0) * (len(self.dims) - len(bin(value)[2:])) + bin(value)[2:]
return [int(i) * factor for i in bin_format]
def search_space(self, factor=1):
search_space = []
for i in range(2 ** len(self.dims) - 1, 0, -1):
search_space += [self.num_to_bin(i, factor)] + [self.num_to_bin(i, -factor)]
return search_space
def next_pos(self, new_positions):
"returns the neighbors of the best solution"
next_set = []
for p in new_positions:
if len(next_set) > self.batch:
break
new_p = [
(x + y) % self.dims[i] if (x + y > 0) else 0
for i, (x, y) in enumerate(zip(p, self.best_choice[1]))
]
idx_p = self.space.knob2point(new_p)
if idx_p not in self.visited:
self.visited.add(idx_p)
next_set.append((idx_p, new_p))
return next_set
def p_value(self, elem_1, elem_2):
if len(elem_1) <= 1 or len(elem_2) <= 1:
return True
return stats.ttest_ind(np.array(elem_1), np.array(elem_2)).pvalue <= self.pvalue
def next_batch(self, batch_size):
ret, self.batch = [], batch_size
for i in range(batch_size):
if i >= len(self.next):
break
if self.space.is_index_valid(self.next[i][0]):
ret.append(self.space.get(self.next[i][0]))
return ret
def speculation(self):
# Gradient descending direction prediction and search space filling
while len(self.next) < self.batch and self.execution < self.total_execution:
self.execution += self.step
self.next += self.next_pos(self.search_space(self.execution))
def update(self, inputs, results):
found_best_pos = False
for i, (_, res) in enumerate(zip(inputs, results)):
try:
if np.mean(self.best_choice[2]) > np.mean(res.costs) and self.p_value(
self.best_choice[2], res.costs
):
self.best_choice = (self.next[i][0], self.next[i][1], res.costs)
found_best_pos = True
except TypeError:
LOGGER.debug("Solution is not valid")
continue
else:
continue
self.next = self.next[self.batch : -1]
if found_best_pos:
self.next += self.next_pos(self.search_space())
self.execution = 1
self.speculation()
def has_next(self):
return len(self.next) > 0
def load_history(self, data_set, min_seed_records=500):
pass
| 4,703 | 35.75 | 91 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A tuner takes a task as input. It proposes some promising :any:`ConfigEntity`
in the :any:`ConfigSpace` and measure them on the real hardware. Then it
proposed the next batch of :any:`ConfigEntity` according to the measure results.
This tuning loop is repeated.
"""
from . import callback
from .tuner import Tuner
from .index_based_tuner import GridSearchTuner, RandomTuner
from .ga_tuner import GATuner
from .xgboost_tuner import XGBTuner
from .droplet_turner import DropletTuner
| 1,273 | 38.8125 | 80 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/callback.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name
"""Namespace of callback utilities of AutoTVM"""
import sys
import time
import logging
import numpy as np
from .. import record
from ..utils import format_si_prefix
logger = logging.getLogger("autotvm")
def log_to_file(file_out, protocol="json"):
"""Log the tuning records into file.
The rows of the log are stored in the format of autotvm.record.encode.
Parameters
----------
file_out : File or str
The file to log to.
protocol: str, optional
The log protocol. Can be 'json' or 'pickle'
Returns
-------
callback : callable
Callback function to do the logging.
"""
def _callback(_, inputs, results):
"""Callback implementation"""
if isinstance(file_out, str):
with open(file_out, "a") as f:
for inp, result in zip(inputs, results):
f.write(record.encode(inp, result, protocol) + "\n")
else:
for inp, result in zip(inputs, results):
file_out.write(record.encode(inp, result, protocol) + "\n")
# pylint: disable=import-outside-toplevel
from pathlib import Path
if isinstance(file_out, Path):
file_out = str(file_out)
return _callback
def log_to_database(db):
"""Save the tuning records to a database object.
Parameters
----------
db: Database
The database
"""
def _callback(_, inputs, results):
"""Callback implementation"""
for inp, result in zip(inputs, results):
db.save(inp, result)
return _callback
class Monitor(object):
"""A monitor to collect statistic during tuning"""
def __init__(self):
self.scores = []
self.timestamps = []
def __call__(self, tuner, inputs, results):
for inp, res in zip(inputs, results):
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
self.scores.append(flops)
else:
self.scores.append(0)
self.timestamps.append(res.timestamp)
def reset(self):
self.scores = []
self.timestamps = []
def trial_scores(self):
"""get scores (currently is flops) of all trials"""
return np.array(self.scores)
def trial_timestamps(self):
"""get wall clock time stamp of all trials"""
return np.array(self.timestamps)
def progress_bar(total, prefix="", si_prefix="G"):
"""Display progress bar for tuning
Parameters
----------
total: int
The total number of trials
prefix: str
The prefix of output message
si_prefix: str
SI prefix for flops
"""
class _Context(object):
"""Context to store local variables"""
def __init__(self):
self.best_flops = 0
self.cur_flops = 0
self.ct = 0
self.total = total
def __del__(self):
if logger.level < logging.DEBUG: # only print progress bar in non-debug mode
sys.stdout.write(" Done.\n")
ctx = _Context()
tic = time.time()
# Validate si_prefix argument
format_si_prefix(0, si_prefix)
if logger.level < logging.DEBUG: # only print progress bar in non-debug mode
sys.stdout.write(
"\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) "
"| %.2f s" % (prefix, 0, 0, si_prefix, 0, total, time.time() - tic)
)
sys.stdout.flush()
def _callback(tuner, inputs, results):
ctx.ct += len(inputs)
flops = 0
for inp, res in zip(inputs, results):
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
if not logger.isEnabledFor(logging.DEBUG): # only print progress bar in non-debug mode
ctx.cur_flops = flops
ctx.best_flops = tuner.best_flops
sys.stdout.write(
"\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) "
"| %.2f s"
% (
prefix,
format_si_prefix(ctx.cur_flops, si_prefix),
format_si_prefix(ctx.best_flops, si_prefix),
si_prefix,
ctx.ct,
ctx.total,
time.time() - tic,
)
)
sys.stdout.flush()
return _callback
| 5,258 | 28.055249 | 95 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, no-self-use, invalid-name
"""Base class of tuner"""
import logging
import tempfile
import numpy as np
from ..measure import MeasureInput, create_measure_batch
from ..utils import format_si_prefix
from ..env import GLOBAL_SCOPE
logger = logging.getLogger("autotvm")
class Tuner(object):
"""Base class for tuners
Parameters
----------
task: autotvm.task.Task
Tuning Task
"""
def __init__(self, task, **kwargs):
self.param = kwargs
self.recorder = None
self.task = task
self.space = self.task.config_space
# keep the current best
self.best_config = None
self.best_flops = 0
self.best_measure_pair = None
self.best_iter = 0
self.error_ct_threshold = 150
# time to leave
self.ttl = None
self.n_trial = None
self.early_stopping = None
def has_next(self):
"""Whether has next untried config in the space
Returns
-------
has_next: bool
"""
raise NotImplementedError()
def next_batch(self, batch_size):
"""get the next batch of configs to be measure on real hardware
Parameters
----------
batch_size: int
The size of the batch
Returns
-------
a batch of configs
"""
raise NotImplementedError()
def update(self, inputs, results):
"""Update parameters of the tuner according to measurement results
Parameters
----------
inputs: Array of autotvm.measure.MeasureInput
The input for measurement
results: Array of autotvm.measure.MeasureResult
result for measurement
"""
def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_prefix="G"):
"""Begin tuning
Parameters
----------
n_trial: int
Maximum number of configs to try (measure on real hardware)
measure_option: dict
The options for how to measure generated code.
You should use the return value ot autotvm.measure_option for this argument.
early_stopping: int, optional
Early stop the tuning when not finding better configs in this number of trials
callbacks: List of callable
A list of callback functions. The signature of callback function is
(Tuner, List of MeasureInput, List of MeasureResult)
with no return value. These callback functions will be called on
every measurement pair. See autotvm/tuner/callback.py for some examples.
si_prefix: str
One of tvm.autotvm.utils.SI_PREFIXES. The SI prefix to use when reporting FLOPS.
"""
measure_batch = create_measure_batch(self.task, measure_option)
n_parallel = getattr(measure_batch, "n_parallel", 1)
early_stopping = early_stopping or 1e9
self.n_trial = n_trial
self.early_stopping = early_stopping
# Validate si_prefix arg
format_si_prefix(0, si_prefix)
old_level = logger.level
GLOBAL_SCOPE.in_tuning = True
i = error_ct = 0
errors = []
while i < n_trial:
if not self.has_next():
break
configs = self.next_batch(min(n_parallel, n_trial - i))
inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
results = measure_batch(inputs)
# keep best config
for k, (inp, res) in enumerate(zip(inputs, results)):
config = inp.config
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
error_ct = 0
result_msg = res
else:
flops = 0
error_ct += 1
tb, error = res.costs
if isinstance(error, str):
errors.append(tb + "\n" + error)
else:
errors.append(tb + "\n" + str(error))
result_msg = errors[-1]
if flops > self.best_flops:
self.best_flops = flops
self.best_config = config
self.best_measure_pair = (inp, res)
self.best_iter = i + k
logger.debug(
"No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s",
i + k + 1,
si_prefix,
format_si_prefix(flops, si_prefix),
format_si_prefix(self.best_flops, si_prefix),
result_msg,
config,
)
i += len(results)
self.ttl = min(early_stopping + self.best_iter, n_trial) - i
self.update(inputs, results)
for callback in callbacks:
callback(self, inputs, results)
if i >= self.best_iter + early_stopping:
logger.debug("Early stopped. Best iter: %d.", self.best_iter)
break
if error_ct > self.error_ct_threshold:
logging.basicConfig()
logger.warning("Too many errors happen in the tuning. Switching to debug mode.")
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(old_level)
if error_ct == i:
_, f = tempfile.mkstemp(prefix="tvm_tuning_errors_", suffix=".log", text=True)
with open(f, "w") as file:
file.write("\n".join(errors))
logging.warning(
"Could not find any valid schedule for task %s. "
"A file containing the errors has been written to %s.",
self.task,
f,
)
GLOBAL_SCOPE.in_tuning = False
del measure_batch
def reset(self):
"""reset the status of tuner"""
self.best_config = None
self.best_flops = 0
self.best_measure_pair = None
def load_history(self, data_set, min_seed_records=500):
"""load history data for transfer learning
Parameters
----------
data_set: Array of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) pair
Previous tuning records
min_seed_records: int
Defaults to 500. Indicates the minimum number of records to
train the tuner with. If there are less than `min_seed_records`
number of records in `data_set`, no training of the tuner
will be done.
"""
raise NotImplementedError()
def set_error_threshold(self, threshold):
"""Modify error counter threshold, which controls switch to debug mode
Parameters
----------
threshold: New threshold value
"""
self.error_ct_threshold = threshold
| 7,786 | 32.856522 | 96 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/index_based_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=abstract-method
"""Grid search tuner and random tuner"""
from .tuner import Tuner
class IndexBaseTuner(Tuner):
"""Base class for index based tuner
This type of tuner determine the next batch of configs based on config indices.
Parameters
----------
task: autotvm.task.Task
The tuning task
range_idx: Optional[Tuple[int, int]]
A tuple of index range that this tuner can select from [begin_idx, end_idx]
"""
def __init__(self, task, range_idx=None):
super(IndexBaseTuner, self).__init__(task)
assert range_idx is None or isinstance(
range_idx, tuple
), "range_idx must be None or (int, int)"
self.visited = []
self.begin_idx, self.end_idx = range_idx or (0, self.space.range_length - 1)
assert self.begin_idx >= 0, "Start index must be positive"
self.end_idx += 1 # Further end_idx is exclusive
assert (
self.end_idx <= self.space.range_length
), "Finish index must be less the space range length "
self.range_length = self.end_idx - self.begin_idx
assert self.range_length > 0, "Index range must be positive"
self.visited_max = self.space.subrange_length(self.begin_idx, self.end_idx)
def has_next(self):
return len(self.visited) < self.visited_max
def load_history(self, data_set, min_seed_records=500):
pass
class GridSearchTuner(IndexBaseTuner):
"""Enumerate the search space in a grid search order"""
def __init__(self, task, range_idx=None):
super(GridSearchTuner, self).__init__(task, range_idx)
self.index = self.begin_idx
if not self.space.is_index_valid(self.index):
self.index = self.space.get_next_index(
self.index, start=self.begin_idx, end=self.end_idx
)
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
self.visited.append(self.index)
ret.append(self.space.get(self.index))
self.index = self.space.get_next_index(
self.index, start=self.begin_idx, end=self.end_idx
)
return ret
class RandomTuner(IndexBaseTuner):
"""Enumerate the search space in a random order
Parameters
----------
task: autotvm.task.Task
Tuning Task
range_idx: Optional[Tuple[int, int]]
A tuple of index range to random
"""
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
index = self.space.get_rand_index(self.begin_idx, self.end_idx, to_exclude=self.visited)
self.visited.append(index)
ret.append(self.space.get(index))
return ret
| 3,586 | 34.166667 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/metric.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Metrics for evaluating tuning process"""
import numpy as np
from ..utils import get_rank
def max_curve(trial_scores):
"""f(n) = max([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = -1e9
for i, score in enumerate(trial_scores):
keep = max(keep, score)
ret[i] = keep
return ret
def mean_curve(trial_scores):
"""f(n) = mean([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = 0
for i, score in enumerate(trial_scores):
keep += score
ret[i] = keep / (i + 1)
return ret
def recall_curve(trial_ranks, top=None):
"""
if top is None, f(n) = sum([I(rank[i] < n) for i < n]) / n
if top is K, f(n) = sum([I(rank[i] < K) for i < n]) / K
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
top: int or None
top-n recall
Returns
-------
curve: Array of float
function values
"""
if not isinstance(trial_ranks, np.ndarray):
trial_ranks = np.array(trial_ranks)
ret = np.zeros(len(trial_ranks))
if top is None:
for i in range(len(trial_ranks)):
ret[i] = np.sum(trial_ranks[:i] <= i) / (i + 1)
else:
for i in range(len(trial_ranks)):
ret[i] = 1.0 * np.sum(trial_ranks[:i] < top) / top
return ret
def cover_curve(trial_ranks):
"""
f(n) = max k s.t. {1,2,...,k} is a subset of {ranks[i] for i < n}
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_ranks))
keep = -1
cover = set()
for i, rank in enumerate(trial_ranks):
cover.add(rank)
while keep + 1 in cover:
keep += 1
ret[i] = keep + 1
return ret / len(trial_ranks)
def average_recall(preds, labels, N):
"""evaluate average recall-n for predictions and labels"""
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return np.sum(curve[:N]) / N
| 3,309 | 24.859375 | 69 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/model_based_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,consider-using-enumerate,abstract-method
"""Base class for model-based tuner
This type of tuner will fit a cost model and use some optimization methods to
find optimums points of cost model in space.
"""
import gc
import numpy as np
from .tuner import Tuner
from ..env import GLOBAL_SCOPE
class FeatureCache(object):
"""Feature cache manager for cache sharing between different cost models"""
def __init__(self):
self.feature_cache = {}
def get(self, key):
"""Get feature cache dictionary for a key
Parameters
----------
key: str
The key of a feature type
Returns
-------
fea_cache: dict
cache dictionary
"""
if key not in self.feature_cache:
self.feature_cache[key] = {}
return self.feature_cache[key]
def size(self, key):
""" " Get the size of a feature cache dictionary
Parameters
----------
key: str
The key of a feature type
Returns
-------
n: int
"""
return len(self.feature_cache.get(key, tuple()))
def clear(self, key):
"""Clear feature cache for a key
Parameters
----------
key: str
The key of a feature type
"""
del self.feature_cache[key]
self.feature_cache[key] = {}
gc.collect()
class CostModel(object):
"""Cost model to predict the speed of a config"""
def __init__(self):
pass
def fit(self, xs, ys, plan_size):
"""Fit to training data
Parameters
----------
xs: Array of int
indexes of configs in the config space
ys: Array of float
The speed (flop, float number operations per second)
plan_size: int
The plan size of tuner
"""
raise NotImplementedError()
def fit_log(self, records, plan_size, min_seed_records=500):
"""Fit training data from log.
Parameters
----------
records: Array of Tuple(MeasureInput, MeasureResult)
The tuning records
plan_size: int
The plan size of tuner
min_seed_records: int
Defaults to 500. Indicates the minimum number of records to
train the tuner with. If there are less than `min_seed_records`
number of records in `data_set`, no training of the tuner
will be done.
"""
raise NotImplementedError()
def predict(self, xs, output_margin=False):
"""Predict the speed of configs
Parameters
----------
xs: Array of int
The indexes of configs to predict
output_margin: bool, optional
Whether output the untransformed margin.
When a model is used as base model, it should output untransformed margin
Returns
-------
preds: Array of float
The prediction
"""
raise NotImplementedError()
def load_basemodel(self, base_model):
"""Load base model for transfer learning
Parameters
----------
base_model: CostModel
base model
"""
raise NotImplementedError()
def spawn_base_model(self):
"""Clone a base model with the same parameters.
The base model is used to fit history data in transfer learning.
Returns
-------
model: CostModel
A model with the same hyperparameter (argument)
"""
raise NotImplementedError()
class ModelOptimizer(object):
"""Optimizer used to find optimal points of cost model"""
def __init__(self):
pass
def find_maximums(self, model, num, exclusive):
"""Find maximum of a cost model
Note we use cost model to predict GFLOPS, so we should find the maximum
Parameters
----------
model: CostModel
Cost model
num: int
The number of returned maximum points
exclusive: set, optional
The excluded set of this optimizer. Return results won't include any
elements in this set.
"""
raise NotImplementedError()
class ModelBasedTuner(Tuner):
"""Base class for model based tuner
This type of tuner will fit a cost model and use an optimizer to
find the maximums of the cost model as next trials
Parameters
----------
task: autotvm.task.Task
The tuning task
cost_model: CostModel
The cost model that predicts the speed of a config (IR)
model_optimizer:
The optimizer to find local optimum points of cost model in tuning search space
plan_size: int
Tuner will re-fit model per `plan_size` new measure samples
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick plan_size of them according to the diversity metric.
"""
def __init__(self, task, cost_model, model_optimizer, plan_size, diversity_filter_ratio=None):
super(ModelBasedTuner, self).__init__(task)
# space
self.task = task
self.target = task.target
self.plan_size = plan_size
self.cost_model = cost_model
self.model_optimizer = model_optimizer
self.diversity_filter_ratio = diversity_filter_ratio
if self.diversity_filter_ratio:
assert self.diversity_filter_ratio >= 1, (
"Diversity filter ratio " "must be larger than one"
)
# trial plan
self.trials = []
self.trial_pt = 0
self.visited = set()
# observed samples
self.xs = []
self.ys = []
self.flops_max = 0.0
self.train_ct = 0
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
while self.trial_pt < len(self.trials):
index = self.trials[self.trial_pt]
if index not in self.visited and self.space.is_index_valid(index):
break
self.trial_pt += 1
if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size):
# if the trial list is empty or
# the tuner is doing the last 5% trials (e-greedy), choose randomly
index = self.space.get_rand_index(to_exclude=self.visited)
ret.append(self.space.get(index))
self.visited.add(index)
return ret
def update(self, inputs, results):
for inp, res in zip(inputs, results):
index = inp.config.index
if res.error_no == 0:
self.xs.append(index)
flops = inp.task.flop / np.mean(res.costs)
self.flops_max = max(self.flops_max, flops)
self.ys.append(flops)
else:
self.xs.append(index)
self.ys.append(0.0)
# Usually the update function is called during the tune loop
# after the index is already added to the visited set.
# However, adding the index to visited again here enables us
# to also use this update function to resume tuning progress in
# case of interruption.
assert self.space.is_index_valid(index)
self.visited.add(index)
# if we have enough new training samples
if len(self.xs) >= self.plan_size * (self.train_ct + 1) and self.flops_max > 1e-6:
self.cost_model.fit(self.xs, self.ys, self.plan_size)
if self.diversity_filter_ratio:
candidate = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size * self.diversity_filter_ratio, self.visited
)
scores = self.cost_model.predict(candidate)
knobs = [self.space.point2knob(x) for x in candidate]
pick_index = submodular_pick(0 * scores, knobs, self.plan_size, knob_weight=1)
maximums = np.array(candidate)[pick_index]
else:
maximums = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size, self.visited
)
self.trials = maximums
self.trial_pt = 0
self.train_ct += 1
def load_history(self, data_set, min_seed_records=500):
# set in_tuning as True to make the feature extraction consistent
GLOBAL_SCOPE.in_tuning = True
# fit base model
base_model = self.cost_model.spawn_base_model()
success = base_model.fit_log(data_set, self.plan_size, min_seed_records)
if not success:
GLOBAL_SCOPE.in_tuning = False
return
# use base model to select initial points
if not self.trials:
# no plan yet, use base model to select initial trials
maximums = self.model_optimizer.find_maximums(base_model, self.plan_size, self.visited)
self.trials = maximums
self.trial_pt = 0
self.cost_model.load_basemodel(base_model)
GLOBAL_SCOPE.in_tuning = False
def has_next(self):
return len(self.visited) < len(self.space)
def submodular_pick(scores, knobs, n_pick, knob_weight=1.0):
"""Run greedy optimization to pick points with regard to both score and diversity.
DiversityScore = knob_weight * number of unique knobs in the selected set
Obj = sum(scores[i] for i in pick) + DiversityScore
Note that this objective function is a monotone submodular function.
Parameters
----------
scores: Array of float
score of every points
knobs: Array of Array of int
feature vector (tunable knobs) of every points
n_pick: int
number of points to pick
knob_weight: float
weight of an unique knob feature
"""
n = len(scores)
assert n == len(knobs)
n_knobs = len(knobs[0])
knobs_set = [set() for _ in range(n_knobs)]
ret = []
remain = list(range(len(scores)))
for _ in range(n_pick):
max_x = -1
max_delta = -1e9
for x in remain:
tmp_delta = scores[x]
for i in range(n_knobs):
if knobs[x][i] not in knobs_set[i]:
tmp_delta += knob_weight
if tmp_delta > max_delta:
max_delta, max_x = tmp_delta, x
ret.append(max_x)
remain.remove(max_x)
for i in range(n_knobs):
knobs_set[i].add(knobs[max_x][i])
return ret
| 11,562 | 31.389356 | 99 | py |
tvm | tvm-main/python/tvm/autotvm/task/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params, opt_level=3):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target=target, params=params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod, mod["main"])
return
# Alter op layout code has been written expecting that tuning is applied
# without it, so we disable AlterOpLayout to maintain that behavior.
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
target, target_host = Target.canon_target_and_host(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.canon_target_and_host(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.te_compiler.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.te_compiler.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
| 5,724 | 35.698718 | 92 | py |
tvm | tvm-main/python/tvm/autotvm/task/task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,not-callable
"""Definition of task function.
Task can be constructed from tuple of func, args, and kwargs.
func is a state-less function, or a string that
registers the standard task.
"""
import functools
import numpy as np
from tvm import runtime
from tvm.ir import container
from tvm.target import Target
from tvm.te import placeholder, tensor
from tvm.tir import expr
from ..utils import get_const_int, get_const_tuple
from .dispatcher import ApplyConfig, DispatchContext
from .space import ConfigSpace
def _lookup_task(name):
task = TASK_TABLE.get(name)
if task is None:
# Unable to find the given task. This might be because we are
# creating a task based on a name that has not been imported.
# Rather than raising an exception here, we return a dummy
# task which cannot be invoked.
task = MissingTask(name)
return task
def serialize_args(args):
"""serialize arguments of a topi function to a hashable tuple.
Parameters
----------
args: list of hashable or Tensor
"""
def _encode(x):
if isinstance(x, tensor.Tensor):
return ("TENSOR", get_const_tuple(x.shape), x.dtype)
if isinstance(x, (tuple, list, container.Array)):
return tuple([_encode(a) for a in x])
if isinstance(x, (str, int, float, expr.Var, expr.Any)):
return x
if isinstance(x, (expr.StringImm, expr.IntImm, expr.FloatImm)):
return x.value
if isinstance(x, runtime.container.String):
return str(x)
if x is None:
return None
raise RuntimeError(
f'Do not support type "{type(x)}" in argument. Consider to use'
f"primitive types or tvm.tir.Var only"
)
ret = []
for t in args:
ret.append(_encode(t))
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`.
Parameters
----------
args: list of hashable or Tensor
"""
ret = []
for t in args:
if isinstance(t, tuple) and t[0] == "TENSOR":
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
def args_to_workload(args, task_name=None):
"""Convert argument list to hashable workload tuple.
This function will convert list to tuple, tvm node to python value and
flatten te.tensor.Tensor to a tuple
Parameters
----------
task_name : str
The AutoTVM task name
args : list of args
The arguments to the function
Returns
-------
ret: hashable
The hashable value
"""
return (task_name,) + serialize_args(args) if task_name is not None else serialize_args(args)
class Task(object):
"""A Tunable Task
Parameters
----------
name: str
The name of the task.
args: Tuple
Positional argument of func
"""
def __init__(self, name, args):
self.name = name
self.args = args
self.kwargs = {} # currently unused
# init null config space
self.config_space = None
self.func = _lookup_task(name)
# auxiliary info, available after `init_space` is called
self.flop = None
self.target = None
self.target_host = None
@property
def workload(self):
return (self.name,) + serialize_args(self.args)
def instantiate(self, config):
"""Instantiate this task function (template) with a config.
Returns corresponding schedule.
Parameters
----------
config: template.ConfigEntity
parameter config for this template
Returns
-------
sch: tvm.te.schedule.Schedule
The tvm schedule
arg_bufs: Array of te.tensor.Tensor
The input/output buffers
"""
config.flop = 0
with ApplyConfig(config):
sch, arg_bufs = self.func(*self.args, **self.kwargs)
if not self.flop:
config.flop = config.flop or compute_flop(sch)
self.flop = config.flop
return sch, arg_bufs
def __getstate__(self):
# custom pickle implementation is required for
# some unpickable local task functions.
# So we only pickle the name of the function
# and restore the function by name when unpickling it.
import cloudpickle # pylint: disable=import-outside-toplevel
self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host)
return {
"name": self.name,
"args": self.args,
"kwargs": self.kwargs,
"config_space": self.config_space,
"flop": self.flop,
"target": self.target,
"target_host": self.target_host,
"func": cloudpickle.dumps(self.func),
}
def __setstate__(self, state):
import cloudpickle # pylint: disable=import-outside-toplevel
self.name = state["name"]
self.args = state["args"]
self.kwargs = state["kwargs"]
self.config_space = state["config_space"]
self.func = cloudpickle.loads(state["func"])
self.flop = state["flop"]
self.target, self.target_host = Target.canon_target_and_host(
state["target"], state["target_host"]
)
def __repr__(self):
return "Task(func_name=%s, args=%s, kwargs=%s, workload=%s)" % (
self.name,
self.args,
self.kwargs,
self.workload,
)
TASK_TABLE = {}
class TaskTemplate(object):
"""
Task template is used to creates a tunable AutoTVM task.
It can be defined by a pair of compute and schedule function using
`_register_task_compute` and `_register_task_schedule`,
or by a customized task creation function that is more flexible using
`_register_customized_task`.
Note that when customized func is registered, compute and schedule function
will be ignored
"""
def __init__(self):
self.fcompute = None
self.fschedule = None
self.fcustomized = None
def __call__(self, *args, **kwargs):
args = deserialize_args(args)
if self.fcustomized is None:
return self._default_func(*args, **kwargs)
assert callable(self.fcustomized)
return self.fcustomized(*args, **kwargs)
def _default_func(self, *args, **kwargs):
assert callable(self.fcompute) and callable(self.fschedule)
out = self.fcompute(*args, **kwargs)
arg_bufs = [out] + self._get_inputs(out)
s = self.fschedule([out])
return s, arg_bufs
@staticmethod
def _get_inputs(out):
inputs = []
queue = [out]
hash_set = set()
while queue:
t = queue.pop(0)
if isinstance(t.op, tensor.PlaceholderOp):
inputs.append(t)
else:
input_tensors = [t for t in t.op.input_tensors if t not in hash_set]
queue.extend(input_tensors)
hash_set.update(input_tensors)
return inputs
class MissingTask(TaskTemplate):
"""
Dummy task template for a task lookup which cannot be resolved.
This can occur if the task being requested from _lookup_task()
has not been imported in this run.
"""
def __init__(self, taskname: str):
super().__init__()
self._taskname = taskname
def __call__(self, *args, **kwargs):
raise RuntimeError(
f"Attempting to invoke a missing task {self._taskname}."
"It is possible that the function is registered in a "
"Python module that is not imported in this run, or the log is out-of-date."
)
def _register_task_compute(name, func=None):
"""Register compute function to autotvm task
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fcompute is not None:
raise ValueError(f"Compute is already registered in autoTVM task {name}")
tmpl.fcompute = f
return f
if func:
return _do_reg(func)
return _do_reg
def _register_task_schedule(name, func=None):
"""Register schedule function to autotvm task
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fschedule is not None:
raise ValueError(f"Schedule is already registered in autoTVM task {name}")
tmpl.fschedule = f
return f
if func:
return _do_reg(func)
return _do_reg
def _register_customized_task(name, func=None):
"""Register a customized function to AutoTVM task.
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fcustomized is not None:
raise ValueError(f"Customized func is already registered in autoTVM task {name}")
tmpl.fcustomized = f
return f
if func:
return _do_reg(func)
return _do_reg
def template(task_name, func=None):
"""Decorate a function as a tunable schedule template.
Parameters
----------
task_name: str
The task name
func: None or callable
A callable template function.
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
func: callable
The decorated function
Examples
--------
The following code is a tunable template for a blocked matrix multiplication
.. code-block:: python
@autotvm.template("matmul")
def matmul(N, L, M, dtype):
A = te.placeholder((N, L), name='A', dtype=dtype)
B = te.placeholder((L, M), name='B', dtype=dtype)
k = te.reduce_axis((0, L), name='k')
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name='C')
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
"""
def _decorate(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
workload = args_to_workload(args, task_name)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
with ApplyConfig(cfg):
return f(*args, **kwargs)
_register_customized_task(task_name, f)
return wrapper
if func:
return _decorate(func)
return _decorate
def create(task_name, args, target, target_host=None):
"""Create a tuning task and initialize its search space
Parameters
----------
task_name : str
The AutoTVM task name
args : List
Positional arguments
target : Target
The compilation target
target_host: Target, optional
The compilation target for host side
Returns
-------
tsk: Task
a task object
"""
args = serialize_args(args)
ret = Task(task_name, args)
target, target_host = Target.canon_target_and_host(target, target_host)
# init config space
ret.config_space = ConfigSpace()
ctx = ApplyConfig(ret.config_space)
with ctx:
with target:
sch, _ = ret.func(*args)
ret.config_space.code_hash = getattr(sch, "code_hash", None)
ret.flop = ret.config_space.flop or compute_flop(sch)
ret.target = target
ret.target_host = target_host
return ret
def get_config():
"""Get current config object
Returns
-------
cfg: ConfigSpace or ConfigEntity
The current config
"""
tgt = Target.current(allow_none=True)
return DispatchContext.current.query(tgt, None)
class FlopCalculationError(RuntimeError):
"""Error happens when estimating FLOP for a compute op"""
def compute_flop(sch):
"""Calculate number of FLOP (floating number operations) of the compute ops in a schedule
Parameters
----------
sch: tvm.te.schedule.Schedule
schedule
Returns
-------
flop: int
number of FLOP in this schedule
"""
def _prod_length(axes):
"""compute product of the lengths of a list of axes"""
try:
num_iter = int(np.prod([get_const_int(axis.dom.extent) for axis in axes]))
except ValueError:
raise FlopCalculationError("The length of axis is not constant. ")
return num_iter
def _count_flop(exp):
"""compute flop for a single expression"""
if isinstance(exp, expr.Reduce):
num_iter = _prod_length(exp.axis)
combiner = exp.combiner.result
source = exp.source
if len(combiner) != 1:
raise FlopCalculationError("Found multiple output in the combiner of reduce op")
if len(source) != 1:
raise FlopCalculationError("Found multiple output in the source of reduce op")
return num_iter * (_count_flop(combiner[0]) + _count_flop(source[0]))
if isinstance(exp, (expr.FloatImm, expr.IntImm)):
return 0
if isinstance(exp, expr.Cast):
return _count_flop(exp.value)
if isinstance(exp, expr.Var):
return 0
if isinstance(
exp,
(
expr.Add,
expr.Sub,
expr.Mul,
expr.Div,
expr.Mod,
expr.FloorDiv,
expr.FloorMod,
expr.Max,
expr.Min,
expr.EQ,
expr.NE,
expr.LT,
expr.LE,
expr.GT,
expr.GE,
expr.And,
expr.Or,
expr.Not,
),
):
base = 1
if isinstance(exp, expr.Not): # unary
return base + _count_flop(exp.a)
return base + _count_flop(exp.a) + _count_flop(exp.b)
if isinstance(exp, expr.Select):
return _count_flop(exp.condition) + max(
_count_flop(exp.true_value), _count_flop(exp.false_value)
)
if isinstance(exp, expr.ProducerLoad):
# Ignore flops from indexing expressions.
return 0
if isinstance(exp, expr.Call):
return sum([_count_flop(x) for x in exp.args])
raise FlopCalculationError("Found unsupported operator in the compute expr")
def traverse(ops):
"""accumulate flops"""
ret = 0
for op in ops:
if isinstance(op, tensor.ComputeOp):
num_element = _prod_length(op.axis)
body = op.body
if len(body) != 1:
raise FlopCalculationError("Found multiple output in the compute")
exp = body[0]
ret += num_element * _count_flop(exp)
ret += traverse([t.op for t in op.input_tensors])
elif isinstance(op, tensor.PlaceholderOp):
pass
else:
raise FlopCalculationError(
f"{op.name} is not supported by autotvm. "
"Only support te.compute currently. "
"Other ops like tvm.te.scan/te.extern is not supported"
)
return ret
try:
ret = traverse(sch.outputs)
except FlopCalculationError as exc:
raise RuntimeError(
"FLOP estimator fails for this operator. Error msg: "
+ str(exc)
+ ". Please use `cfg.add_flop` to manually set "
"FLOP for this operator"
)
if ret == 0:
raise RuntimeError(
"Cannot find float number operation in this operator. "
"Please use `cfg.add_flop` to manually set "
"FLOP for this operator"
)
return ret
| 18,201 | 27.937997 | 99 | py |
tvm | tvm-main/python/tvm/autotvm/task/space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-few-public-methods,invalid-name,unused-argument,arguments-differ
# pylint: disable=consider-using-enumerate,too-many-lines, invalid-sequence-index
"""
Template configuration space.
Each template function can be parameterized by a ConfigSpace.
The space is declared when we invoke the template function with ConfigSpace.
During evaluation, we pass in a ConfigEntity, which contains a specific
entity in the space. This entity contains deterministic parameters.
"""
from __future__ import absolute_import as _abs
import itertools
import functools
import math
from collections import namedtuple, OrderedDict
from random import randrange
import numpy as np
from tvm.te import schedule, thread_axis
from tvm.tir import expr
from tvm.autotvm.utils import get_const_int
Axis = namedtuple("Axis", ["space", "index"])
try:
_long = long
except NameError:
_long = int
class InstantiationError(ValueError):
"""Actively detected error in instantiating a template with a config,
raised by cfg.raise_error
e.g. too many unrolling, too many threads in a block
"""
class TransformSpace(object):
"""Base class for transform space
TransformSpace is the node in the computation graph of axes
.. note::
We can regard our schedule code as a transformation graph of axes.
Starting from raw axes in the definition of te.compute, we can transform these axes
by some operators. The operator includes 'split', 'reorder' and 'annotate'.
Each operator has some tunable parameters (e.g. the split factor).
Then the tuning process is just to find good parameters of these op.
So all the combinations of the parameters of these op form our search space.
Naming convention:
We call the set of all possible values as XXXSpace. (XXX can be Split, Reorder, Config ...)
We call a specific entity in a space as XXXEntity.
"""
def __init__(self):
self.ins = []
self.num_output = 0
self.entities = []
def __len__(self):
return len(self.entities)
def __getitem__(self, index):
"""Get an entity of the space by index
Parameters
----------
index: int
Returns
-------
transform entity
"""
return self.entities[index]
@staticmethod
def get_num_output():
"""get number of output axes after this transform
Returns
-------
n: int
number of output axes
"""
return 0
class VirtualAxis(TransformSpace):
"""Axis placeholder in template
Parameters
----------
var: int or tvm.te.schedule.IterVar
If is int, return a virtual axis whose length is the provided argument.
If is IterVar, return a virtual axis whose length is extracted from
the IterVar's extent domain.
name: str
"""
name_ct = 0
def __init__(self, var, name=None):
super(VirtualAxis, self).__init__()
self.num_output = 1
if name is None:
name = f"axis_{VirtualAxis.name_ct}"
VirtualAxis.name_ct += 1
self.name = name
if isinstance(var, (int, _long)):
self.length = var
elif isinstance(var, schedule.IterVar):
self.name = var.var.name
if var.dom is None:
self.length = -1
else:
self.length = get_const_int(var.dom.extent)
elif isinstance(var, VirtualAxis):
self.length = var.length
else:
raise RuntimeError("Invalid type of axis: " + str(type(var)))
@staticmethod
def get_num_output(var, name=None):
return 1
def __repr__(self):
return f"vaxis({self.name})"
def get_factors(n):
"""return all factors of an integer
Parameters
----------
n: int
integer to factorize
Returns
-------
factors: list
List of all factors
"""
step = 2 if n % 2 else 1
ret = list(
set(
functools.reduce(
list.__add__,
([i, n // i] for i in range(1, int(math.sqrt(n)) + 1, step) if n % i == 0),
)
)
)
ret.sort()
return ret
def get_pow2s(n):
"""return all power-of-two numbers that are less or equal than the integer
Parameters
----------
n: int
integer for reference
Returns
-------
factors: list
List of all power-of-two numbers
"""
return [2**x for x in range(math.floor(math.log2(n)) + 1)]
class SplitSpace(TransformSpace):
"""Split an axis for several times"""
def __init__(self, axes, policy, **kwargs):
super(SplitSpace, self).__init__()
axis = axes[0]
self.policy = policy
self.entities = []
max_factor = kwargs.get("max_factor", 1 << 31)
fil = kwargs.get("filter", lambda x: True)
self.product = axis.length
self.num_output = kwargs.get("num_outputs", 0)
assert self.num_output > 0
if policy == "candidate":
for size in kwargs["candidate"]:
assert len(size) == self.num_output
self.entities.append(SplitEntity(size))
else:
if policy == "verbose":
# Include factors and power-of-twos. May generate tails.
divisibles = get_factors(self.product)
pow2s = get_pow2s(self.product)
factors = [x for x in list(set(divisibles) | set(pow2s)) if x <= max_factor]
elif policy == "factors":
# Include divisible factors. Guarantee no tails.
factors = [x for x in get_factors(self.product) if x <= max_factor]
elif policy == "power2":
# Include less, equal, and round-up power-of-two numbers. May generate tails.
factors = [x for x in get_pow2s(self.product) if x <= max_factor]
else:
raise RuntimeError(f"Invalid policy: {policy}")
# Enforce the product of all split factors equals to the axis length
no_tail = kwargs.get("no_tail", policy == "factors")
# Generate split entity by enumerating candidate factors.
self.factors = factors
self._generate_space(0, [None] * (self.num_output - 1), enforce_no_tail=no_tail)
self.entities = list(filter(fil, self.entities))
def _generate_space(self, now, tmp_stack, enforce_no_tail=False):
"""Generate space by DFS"""
if now == self.num_output - 1:
prod = functools.reduce(lambda x, y: x * y, tmp_stack)
if prod > self.product:
return
if self.product % prod == 0 or (not enforce_no_tail and prod < self.product):
self.entities.append(SplitEntity([-1] + tmp_stack[::-1]))
else:
for factor in self.factors:
tmp_stack[now] = factor
self._generate_space(now + 1, tmp_stack, enforce_no_tail)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return kwargs["num_outputs"]
def __repr__(self):
return "Split(policy=%s, product=%d, num_outputs=%d) len=%d" % (
self.policy,
self.product,
self.num_output,
len(self),
)
class SplitEntity(object):
"""
A split operation with detailed parameters
that can apply to an axis
Parameters
----------
size: Array of int
the size of every axis after split.
e.g. an axis of extent 128, we split it into 3 axes, a possible
size is [4, 4, 8] (4x4x8 = 128).
"""
def __init__(self, size):
self.size = size
def apply(self, sch, op, axis):
"""Apply split to an axis
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axis: tvm.te.schedule.IterVar
axis to split
Returns
-------
axes : list of Axis
The transformed axes.
"""
ret = []
for i in range(1, len(self.size)):
ax0, ax1 = sch[op].split(axis, int(np.prod(self.size[i:])))
ret.append(ax0)
axis = ax1
return ret + [axis]
def __repr__(self):
return str(self.size)
class ReorderSpace(TransformSpace):
"""The parameter space for ordering an array of axes"""
def __init__(self, axes, policy, **kwargs):
super(ReorderSpace, self).__init__()
self.ins = axes
self.policy = policy
self.num_output = len(axes)
if policy == "identity":
self.entities = [ReorderEntity(range(len(axes)))]
elif policy == "all":
self.entities = [ReorderEntity(x) for x in itertools.permutations(range(len(axes)))]
elif policy == "interval_all":
begin, end = kwargs["interval"]
sub_space = list(itertools.permutations(range(begin, end)))
prefix, suffix = tuple(range(begin)), tuple(range(end, len(axes)))
self.entities = [ReorderEntity(prefix + x + suffix) for x in sub_space]
elif policy == "candidate":
candidate = kwargs["candidate"]
for can in candidate:
perm = [axes.index(x) for x in can]
self.entities.append(ReorderEntity(perm))
elif policy == "interleave":
spatial, reduce = kwargs["spatial"], kwargs["reduce"]
spatial = [[axes.index(x) for x in ch] for ch in spatial]
reduce = [[axes.index(x) for x in ch] for ch in reduce]
outer_merged = self._merge_chain([x[:-1] for x in spatial])
inner_merged = self._merge_chain([x[-1:] for x in spatial] + reduce)
for o in outer_merged:
for i in inner_merged:
self.entities.append(ReorderEntity(o + i))
elif policy == "interleave_cuda":
spatial, reduce = kwargs["spatial"], kwargs["reduce"]
spatial = [[axes.index(x) for x in ch] for ch in spatial]
reduce = [[axes.index(x) for x in ch] for ch in reduce]
outer_merged = self._merge_chain([x[:-1] for x in spatial])
reduce_merged = self._merge_chain(reduce)
inner_merged = [x[-1] for x in spatial]
for o in outer_merged:
for r in reduce_merged:
self.entities.append(ReorderEntity(o + r + inner_merged))
else:
raise RuntimeError("Invalid policy: " + policy)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return len(axes)
def __repr__(self):
return f"Reorder(policy={self.policy}) len={len(self)}"
def _merge_chain(self, chains):
"""generate all combinations of merge some chains"""
merged = []
tmp_pt = [0] * len(chains)
tmp_stack = []
size = np.sum([len(x) for x in chains])
self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged)
return merged
def _merge_dfs(self, chains, size, tmp_pt, tmp_stack, merged):
if np.sum(tmp_pt) == size:
merged.append(list(tmp_stack))
return
for i in range(len(chains)):
# use i == np.argmax(....) here to take spatial order into consideration
# if we don't want to consider spatial order, we can use tmp_pt[i] == np.max(....)
if tmp_pt[i] < len(chains[i]) and (
i == np.argmax([len(chains[x]) - tmp_pt[x] for x in range(len(chains))])
):
tmp_stack.append(chains[i][tmp_pt[i]])
tmp_pt[i] += 1
self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged)
tmp_pt[i] -= 1
tmp_stack.pop()
class ReorderEntity(object):
"""A reorder operation with detailed parameters that can apply to axes
Parameters
----------
perm: Array of int
define the permutation
"""
def __init__(self, perm):
self.perm = perm
def apply(self, sch, op, axes):
"""Apply reorder to an array of axes
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axis: tvm.te.schedule.IterVar
axis to split
Returns
-------
axes : list of Axis
The transformed axes.
"""
if len(axes) == len(self.perm):
new_order = [axes[i] for i in self.perm]
else:
new_order = [axes[i] for i in self.perm if i < len(axes)]
sch[op].reorder(*new_order)
return new_order
def __repr__(self):
return str(self.perm)
class AnnotateSpace(TransformSpace):
"""The parameter space for annotating an array of axes"""
def __init__(self, axes, policy, **kwargs):
super(AnnotateSpace, self).__init__()
self.ins = axes
self.policy = policy
self.num_output = len(axes)
if policy == "bind_gpu":
self.num_axis = len(axes)
if self.num_axis >= 6:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 6)
+ [
"blockIdx.z",
"blockIdx.y",
"blockIdx.x",
"threadIdx.z",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 4:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 4)
+ ["blockIdx.y", "blockIdx.x", "threadIdx.y", "threadIdx.x"]
)
)
elif self.num_axis >= 2:
self.entities.append(
AnnotateEntity(["fuse"] * (self.num_axis - 2) + ["blockIdx.x", "threadIdx.x"])
)
else:
raise RuntimeError("Unhandled case in bind_gpu")
elif policy == "bind_gpu_virtual":
self.num_axis = len(axes)
if self.num_axis >= 9:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 9)
+ [
"blockIdx.z",
"blockIdx.y",
"blockIdx.x",
"vthread",
"vthread",
"vthread",
"threadIdx.z",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 6:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 6)
+ [
"blockIdx.y",
"blockIdx.x",
"vthread",
"vthread",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 3:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 3) + ["blockIdx.x", "vthread", "threadIdx.x"]
)
)
else:
raise RuntimeError("Unhandled case in bind_gpu")
elif policy == "locate_cache":
self.num_axis = len(axes)
num_anchor = kwargs["num_anchor"]
self.anns = list(itertools.combinations(range(self.num_axis), num_anchor))
self.entities = [AnnotateEntity(x) for x in self.anns]
else: # none, vec, unroll, try_vec, try_unroll, try_vec_unroll, ...
anns = policy.replace("try", "none").split("_")
for ann in anns:
if ann not in ["none", "unroll", "vec"]:
raise RuntimeError("Invalid policy: " + policy)
self.num_axis = len(axes)
self.anns = [anns] * self.num_axis
self._generate_space(0, [""] * self.num_axis)
def _generate_space(self, now, tmp_stack):
"""Generate space by DFS"""
if now == self.num_axis:
# only vectorize inner most dimension
vec_ct = tmp_stack.count("vec")
if vec_ct in (0, 1):
self.entities.append(AnnotateEntity(list(tmp_stack)))
else:
for ann in self.anns[now]:
tmp_stack[now] = ann
self._generate_space(now + 1, tmp_stack)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return len(axes)
def __repr__(self):
return f"Annotate(policy={self.policy}) len={len(self)}"
class AnnotateEntity(object):
"""An annotation operation with detailed parameters that can apply to axes
Parameters
----------
anns: Array of string
The annotations of axes
"""
def __init__(self, anns):
self.anns = anns
def apply(
self, sch, op, axes, axis_lens=None, max_unroll=None, vec_size=None, cfg=None, source=None
):
"""Apply annotation to an array of axes
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axes: Array of tvm.te.schedule.IterVar
axis to split
axis_lens: Array of int, optional
the length of axes
max_unroll: int, optional
maximum unroll step
vec_size: Array of int, optional
valid vector lanes for vectorization
cfg: ConfigEntity, optional
cfg for recording error
source: Array of Array tensor, optional
source tensor for attaching cache
Returns
-------
axes : list of tvm.te.schedule.IterVar
The transformed axes
"""
if source is not None: # special case : attach cache_read/cache_write
for src, to in zip(source, self.anns):
for t in src:
sch[t].compute_at(sch[op], axes[to])
else: # other cases
for i, ann in enumerate(self.anns):
if ann == "none":
pass
elif ann == "unroll":
if max_unroll and axis_lens[i] > max_unroll:
cfg.raise_error("Too large factor for unrolling")
sch[op].unroll(axes[i])
elif ann == "vec":
if vec_size and axis_lens[i] not in vec_size:
cfg.raise_error("Wrong size of lanes in vectorization")
sch[op].vectorize(axes[i])
elif ann == "blockIdx.x":
sch[op].bind(axes[i], thread_axis("blockIdx.x"))
elif ann == "blockIdx.y":
sch[op].bind(axes[i], thread_axis("blockIdx.y"))
elif ann == "blockIdx.z":
sch[op].bind(axes[i], thread_axis("blockIdx.z"))
elif ann == "threadIdx.x":
sch[op].bind(axes[i], thread_axis("threadIdx.x"))
elif ann == "threadIdx.y":
sch[op].bind(axes[i], thread_axis("threadIdx.y"))
elif ann == "threadIdx.z":
sch[op].bind(axes[i], thread_axis("threadIdx.z"))
elif ann == "vthread":
sch[op].bind(axes[i], thread_axis("vthread"))
elif ann == "fuse":
assert i < len(axes) - 1
axes[i + 1] = sch[op].fuse(axes[i], axes[i + 1])
else:
raise RuntimeError("Invalid annotation " + ann)
return axes
def __repr__(self):
return str(self.anns)
class OtherOptionSpace(TransformSpace):
"""The parameter space for general option"""
def __init__(self, axes, policy, **kwargs):
super(OtherOptionSpace, self).__init__()
candidate = kwargs["candidate"]
self.entities = [OtherOptionEntity(x) for x in candidate]
@staticmethod
def get_num_output(axes, policy, **kwargs):
return 0
def __repr__(self):
return f"OtherOption({self.entities}) len={len(self)}"
class OtherOptionEntity(object):
"""The parameter entity for general option, with a detailed value"""
def __init__(self, val):
self.val = val
def __repr__(self):
return str(self.val)
class ConfigSpace(object):
"""The configuration space of a schedule. Pass it as config in template to
collect transformation space and build transform graph of axes
"""
def __init__(self):
# private dict to provide sugar
self.space_map = OrderedDict() # name -> space
self._collect = True
self._length = None
self._range_length = None
self._dims = None
self._entity_map = OrderedDict() # name -> entity
self._constraints = []
self.errors = []
self.code_hash = None
self.flop = 0
self.cost = None
self.is_fallback = False
self._shared_filter = None
self._shared_filter_cache = None
@staticmethod
def axis(var):
"""get a virtual axis (axis placeholder)
Parameters
----------
var: int or tvm.te.schedule.IterVar
If is int, return an axis whose length is the provided argument.
If is IterVar, return an axis whose length is extracted from the
IterVar's extent domain.
"""
return VirtualAxis(var)
reduce_axis = axis
def define_split(self, name, axis, policy="factors", **kwargs):
"""Define a new tunable knob which splits an axis into a list of axes
Parameters
----------
name: str
name to index the entity of this space
axis: tvm.te.schedule.IterVar
axis to split
policy: str
name of policy.
If is 'factors', the tuner will try all divisible factors.
If is 'power2', the tuner will try power-of-two factors less or equal to the length.
If is 'verbose', the tuner will try all candidates in above two policies.
If is 'candidate', try given candidates.
**kwargs:
extra arguments for policy
``max_factor``:
the maximum split factor (`int`).
``filter``:
see examples below for how to use filter (`Callable[[int], bool]`).
``num_outputs``:
the total number of axis after split (`int`).
``no_tail``:
should we only include divisible numbers as split factors (`bool`).
``candidate``:
(policy=candidate) manual candidate list (`List`).
Examples
--------
>>> # use custom candidates
>>> cfg.define_split('tile_x', x, policy='candidate', num_outputs=3,
>>> candidate=[[1, 4, 4], [4, 1, 4]])
>>> # use a filter that only accepts the split scheme whose inner most tile is less then 4
>>> cfg.define_split('tile_y', y, policy='factors', num_outputs=3,
>>> filter=lambda x: x.size[-1] <= 4)
"""
axes = [axis]
return self._add_new_transform(SplitSpace, name, axes, policy, **kwargs)
def define_reorder(self, name, axes, policy, **kwargs):
"""Define a new tunable knob which reorders a list of axes
Parameters
----------
name: str
name to index the entity of this space
axes: Array of tvm.te.schedule.IterVar
axes to reorder
policy: str
name of policy
If is 'identity', do an identity permutation.
If is 'all', try all permutations.
If is 'interval_all', try all permutations of an interval of axes.
If is 'candidate', try listed candidate.
If is 'interleave', interleave chains of spatial axes and chains of reduction axes.
kwargs: dict
extra arguments for policy
"""
return self._add_new_transform(ReorderSpace, name, axes, policy, **kwargs)
def define_annotate(self, name, axes, policy, **kwargs):
"""Define a new tunable knob which annotates a list of axes
Parameters
----------
name: str
name to index the entity of this space
axes: Array of tvm.te.schedule.IterVar
axes to annotate
policy: str
name of policy
If is 'unroll', unroll the axes.
If is 'try_unroll', try to unroll the axes.
If is 'try_unroll_vec', try to unroll or vectorize the axes.
If is 'bind_gpu', bind the first few axes to gpu threads.
If is 'locate_cache', choose n axes to attach shared/local cache.
kwargs: dict
extra arguments for policy
"""
return self._add_new_transform(AnnotateSpace, name, axes, policy, **kwargs)
def define_knob(self, name, candidate):
"""Define a tunable knob with a list of candidates
Parameters
----------
name: str
name key of that option
candidate: list
list of candidates
"""
return self._add_new_transform(OtherOptionSpace, name, [], None, candidate=candidate)
def add_flop(self, flop):
"""Add float operation statistics for this tuning task
Parameters
---------
flop: int or float or IntImm or FloatImm
number of float operations
"""
if isinstance(flop, (expr.IntImm, expr.FloatImm)):
flop = flop.value
self.flop += float(flop)
def raise_error(self, msg):
"""register error in config
Using this to actively detect error when scheduling.
Otherwise these error will occur during runtime, which
will cost more time.
Parameters
----------
msg: str
"""
self.errors.append(msg)
def valid(self):
"""Check whether the config meets all the constraints
.. note::
This check should be called after instantiation of task,
because the ConfigEntity/ConfigSpace collects errors during instantiation
Returns
-------
valid: bool
whether the config meets all the constraints
"""
return not bool(self.errors)
def is_index_valid(self, index):
"""Checks if the index satisfies the multi_filter condition
Parameters
----------
index: int
index from the range of the space
Returns
-------
valid: bool
whether the index meets all the constraints
"""
assert 0 <= index < self.range_length
if self._shared_filter is None:
return True
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._shared_filter_cache[index]
def multi_filter(self, filter): # pylint: disable=redefined-builtin
"""The filter can restrict combination of parameters in difference to the knob filter,
that restricts only single parameter
Parameters
----------
filter: function
predicate with one argument (Callable[[int], bool])
.. note::
Using this filter causes additional restrictions on the use of __len__.
Normally, it define the count of valid indexes and the range of space, but when
multi_filter enabled, it requires to use __len__ for getting the count of valid
indexes or range_length for the range of space. It is recommended to use:
``is_index_valid``, ``get_next_index``, ``get_rand_index`` to bypass the space
Examples
--------
>>> # Pre-requisites
>>> candidates = [[16, 64], [32, 32], [64, 16]]
>>> filter = lambda v: v.size[0] != 16
>>> multi_filter = lambda e: (e["tile_x"].size[0] + e["tile_y"].size[0]) <= 64
>>> # Case 1 - without filtering
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates)
>>> # [('tile_x', [16, 64]), ('tile_y', [16, 64])],None,0
>>> # [('tile_x', [32, 32]), ('tile_y', [16, 64])],None,1
>>> # [('tile_x', [64, 16]), ('tile_y', [16, 64])],None,2
>>> # [('tile_x', [16, 64]), ('tile_y', [32, 32])],None,3
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,4
>>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,5
>>> # [('tile_x', [16, 64]), ('tile_y', [64, 16])],None,6
>>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,7
>>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,8
>>> # Case 2 - with filter
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0
>>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,1
>>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,2
>>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,3
>>> # Case 3 - with filter and multi_filter
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.multi_filter(filter=multi_filter)
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0
"""
if self._collect:
self.clear_cache()
self._shared_filter = filter
@property
def range_length(self):
"""Length of the index range in the space"""
if self._range_length is None:
self._range_length = int(np.prod([len(x) for x in self.space_map.values()]))
return self._range_length
@property
def dims(self):
"""Dimensions in the space"""
if self._dims is None:
self._dims = [len(x) for x in self.space_map.values()]
return self._dims
def subrange_length(self, start, end):
"""Returns the number of valid indexes within the limited range from [start, end]
Parameters
----------
start: int
start of subrange, inclusive
end: int
end of subrange, exclusive
Returns
-------
count: int
number of valid indexes
"""
assert 0 <= start <= end <= self.range_length
if self._shared_filter is None:
return end - start
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._shared_filter_cache[start:end].count(True)
def get_rand_index(self, start=None, end=None, to_exclude=None):
"""Returns a random valid index unlisted to exclusion
Parameters
----------
start: int, optional
specifying at which position to start, inclusive
end: int, optional
specifying at which position to end, exclusive
to_exclude: list, optional
determines unsuitable values
Returns
-------
rand: int
random index in the space
.. note::
Excluding all valid space indexes will lead to an infinite loop.
"""
start = start or 0
end = end or self.range_length
while True:
index = randrange(start, end)
if self.is_index_valid(index) and index not in (to_exclude or []):
return index
def get_next_index(self, index, n=1, start=None, end=None):
"""Returns the nth valid next index or None if out of range
Parameters
----------
index: int
specifying at which position to start, inclusive
n: int, optional
step by using to find the next index, for the opposite
direction a negative number should be used
start: list, optional
start of subrange, inclusive
end: list, optional
end of subrange, exclusive
Returns
-------
next: int
next index in the space
"""
assert n != 0
start = start or 0
end = end or self.range_length
if self._shared_filter is None:
index += n
if start <= index < end:
return index
return None
trend = 1 if n > 0 else -1
counter = abs(n)
while counter != 0:
index += trend
if index < start or index >= end:
return None
if self.is_index_valid(index):
counter -= 1
return index
def clear_cache(self):
"""Clears the cache of index validity"""
del self._shared_filter_cache
self._dims = None
self._length = None
self._range_length = None
self._shared_filter_cache = None
def _make_shared_filter_cache(self):
def apply(t):
entities = OrderedDict()
for name, space in self.space_map.items():
entities[name] = space[t % len(space)]
t //= len(space)
return bool(self._shared_filter(entities))
self._shared_filter_cache = tuple(apply(i) for i in range(self.range_length))
self._length = self._shared_filter_cache.count(True)
def point2knob(self, point):
"""Convert point form (single integer) to knob (vector)
Parameters
----------
point: int
point to convert
Returns
-------
knob: list
knob representation of the point
"""
knob = []
for dim in self.dims:
knob.append(point % dim)
point //= dim
return knob
def knob2point(self, knob):
"""Convert knob form (vector) to point form (single integer)
Parameters
----------
knob: list
knob to convert
Returns
-------
point: int
point of the knob representation
"""
point = 0
for j, k in enumerate(knob):
point += int(np.prod(self.dims[:j])) * k
return point
def sample_ints(self, m):
"""
Sample m different integer numbers from [0, self.range_length) without replacement
This function is an alternative of `np.random.choice` when self.range_length > 2 ^ 32, in
which case numpy does not work.
Parameters
----------
m: int
The number of sampled int
Returns
-------
ints: an numpy array of size m
"""
assert m <= len(self)
vis = set()
while len(vis) < m:
new = randrange(0, self.range_length)
if self.is_index_valid(new):
vis.add(new)
return np.fromiter(vis, int, len(vis))
def random_walk(self, point):
"""random walk as local transition
Parameters
----------
point: int
index of the ConfigEntity
Returns
-------
new_point: int
new neighborhood index
"""
# transform to knob form
old_knob = self.point2knob(point)
new_knob = old_knob.copy()
new_point = self.knob2point(new_knob)
# mutate
while new_knob == old_knob or not self.is_index_valid(new_point):
from_i = np.random.randint(len(old_knob))
to_v = np.random.randint(self.dims[from_i])
new_knob[from_i] = to_v
new_point = self.knob2point(new_knob)
# transform to index form
return new_point
def _add_new_transform(self, space_class, name, axes, policy, **kwargs):
"""Add a new transform space in template"""
# if we do not have tuned info (_collect == True) but defined KNOB value
# for "default" scheduling before call of _add_new_transform, in this case
# no need to create new space and override previously pointed KNOB values
if kwargs.get("filter"):
self.clear_cache()
if self._collect and not (self.is_fallback and name in self._entity_map):
# convert schedule axis to space definition axis
axes = [x if isinstance(x, (VirtualAxis, Axis)) else self.axis(x) for x in axes]
# add subspace (knob)
space = space_class(axes, policy, **kwargs)
self.space_map[name] = space
self._entity_map[name] = space[0]
return [Axis(space, i) for i in range(space.num_output)]
return [Axis(None, i) for i in range(space_class.get_num_output(axes, policy, **kwargs))]
def __len__(self):
"""Returns the number of valid indexes in the space"""
if self._shared_filter is None:
return self.range_length
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._length
def get(self, index):
"""Get a config entity with detailed parameters from this space
Parameters
----------
index: int
index in the space
Returns
-------
config: ConfigEntity
config corresponds to the index
"""
if index < 0 or index >= self.range_length:
raise IndexError(f"Index out of range: size {self.range_length}, got index {index}")
if not self.is_index_valid(index):
raise IndexError(
f"Index does not correspond to the multi-filter condition, got index {index}. "
f"Use is_index_valid to pre-check"
)
entities = OrderedDict()
t = index
for name, space in self.space_map.items():
entities[name] = space[t % len(space)]
t //= len(space)
ret = ConfigEntity(index, self.code_hash, entities, self._constraints)
return ret
def __iter__(self):
return self._entity_map.__iter__()
def __getitem__(self, name):
"""get the transform entity(knob) of this entity by name
do not use this to get a ConfigEntity of this space (should use ConfigSpace.get instead)
Parameters
----------
name: str
name of the transform
"""
return self._entity_map[name]
def __repr__(self):
res = f"ConfigSpace (len={len(self)}, range_length={self.range_length}, space_map=\n"
for i, (name, space) in enumerate(self.space_map.items()):
res += f" {i:2d} {name}: {space}\n"
return res + ")"
_ann_to_number = {
"none": 0,
"vec": 1,
"unroll": 2,
"blockIdx.x": 3,
"blockIdx.y": 4,
"blockIdx.z": 5,
"threadIdx.x": 6,
"threadIdx.y": 7,
"threadIdx.z": 8,
"vthread": 9,
"fuse": 10,
}
class ConfigEntity(ConfigSpace):
"""A configuration with detailed parameters
Parameters
----------
index: int
index of this config in space
code_hash: str
hash of schedule code
entity_map: dict
map name to transform entity
constraints : list
List of constraints
"""
def __init__(self, index, code_hash, entity_map, constraints):
super(ConfigEntity, self).__init__()
self.index = index
self._collect = False
self._entity_map = entity_map
self._space_map = None
self._constraints = constraints
self.code_hash = code_hash
def get_flatten_feature(self):
"""flatten entities to a numerical one-dimensional feature vector
Returns
-------
fea: np.array
one dimensional float32 array
"""
fea = []
for _, v in self._entity_map.items():
if isinstance(v, SplitEntity):
fea.extend(v.size)
elif isinstance(v, ReorderEntity):
# use a naive way: directly copy the permutation
fea.extend(v.perm)
elif isinstance(v, AnnotateEntity):
# one-hot encoding
for ann in v.anns:
tmp = [0] * len(_ann_to_number)
tmp[_ann_to_number[ann]] = 1
fea.extend(tmp)
elif isinstance(v, OtherOptionEntity):
fea.append(v.val)
return np.array(fea, dtype=np.float32)
def get_other_option(self):
"""
Returns
-------
other_option: dict
other tunable parameters (tunable parameters defined by `cfg.define_knob`)
"""
return {x: x.val for x in self._entity_map.values() if isinstance(x, OtherOptionEntity)}
def to_json_dict(self):
"""convert to a json serializable dictionary
Return
------
json_dict: dict
a json serializable dictionary
"""
ret = {}
ret["index"] = int(self.index)
ret["code_hash"] = self.code_hash
entity_map = []
for k, v in self._entity_map.items():
if isinstance(v, SplitEntity):
entity_map.append((k, "sp", v.size))
elif isinstance(v, ReorderEntity):
entity_map.append((k, "re", v.perm))
elif isinstance(v, AnnotateEntity):
entity_map.append((k, "an", v.anns))
elif isinstance(v, OtherOptionEntity):
entity_map.append((k, "ot", v.val))
else:
raise RuntimeError("Invalid entity instance: " + v)
ret["entity"] = entity_map
return ret
@staticmethod
def from_json_dict(json_dict):
"""Build a ConfigEntity from json serializable dictionary
Parameters
----------
json_dict: dict
Json serializable dictionary. This should be the return value
of :any:`to_json_dict`.
Returns
-------
config: ConfigEntity
The corresponding config object
"""
index = json_dict["index"]
code_hash = json_dict["code_hash"]
constraints = []
entity_map = OrderedDict()
for item in json_dict["entity"]:
key, knob_type, knob_args = item
if knob_type == "sp":
entity = SplitEntity(knob_args)
elif knob_type == "re":
entity = ReorderEntity(knob_args)
elif knob_type == "an":
entity = AnnotateEntity(knob_args)
elif knob_type == "ot":
entity = OtherOptionEntity(knob_args)
else:
raise RuntimeError("Invalid config knob type: " + knob_type)
entity_map[str(key)] = entity
return ConfigEntity(index, code_hash, entity_map, constraints)
def __repr__(self):
return f"{str(self._entity_map)[12:-1]},{self.code_hash},{self.index}"
class FallbackConfigEntity(ConfigSpace):
"""The config entity created to support fallback"""
def __init__(self):
super(FallbackConfigEntity, self).__init__()
self.is_fallback = True
def fallback_split(self, name, constraints):
"""Fallback a split knob
Parameters
----------
name: str
name of the knob
constraints: List of int
The maximum tile size for every dimension. Value `-1` means no constraint.
Examples
--------
If you use cfg.define_split('tile_0', 128, num_outputs=3),
Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [4, 8, 4]
If you use cfg.define_split('tile_0', 49, num_outputs=3),
Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [7, 7, 1]
"""
space = self.space_map[name]
assert isinstance(space, SplitSpace)
assert len(constraints) == space.num_output
# '-1' means no constraint
constraints = [x if x != -1 else 1e10 for x in constraints]
entity = self._entity_map[name]
now = space.product
for i in reversed(range(space.num_output)):
factors = get_factors(now)
find = len(factors) - 1
for j, f in enumerate(factors):
if f > constraints[i]:
find = j - 1
break
if find >= 0:
entity.size[i] = factors[find]
now //= factors[find]
else:
raise RuntimeError("Cannot find feasible fallback split entity for node: " + name)
def fallback_with_reference_log(self, ref_log):
"""A data driven fallback mechanism.
We use tuned parameters from TopHub as reference data.
For an unseen shape, we find the most similar tuned one from TopHub and
mimic its parameters.
Note that we are not matching by workload (e.g., input size, kernel size),
but instead matching by configuration space. The idea is that if two workloads have
similar configuration space, their optimal configurations are also likely to be similar.
Parameters
----------
ref_log: List of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult)
The reference log
"""
knob_names = [x for x in self.space_map.keys() if isinstance(self.space_map[x], SplitSpace)]
# find best match config in reference data by matching tiling factors
factor_list = []
for knob_name in knob_names:
factor_list.append(get_factors(self.space_map[knob_name].product))
best_match_cfg = None
best_match_score = 0
for inp, _ in ref_log:
match_score = 0
for i, knob_name in enumerate(knob_names):
factors = get_factors(int(np.prod(inp.config[knob_name].size)))
match_score += float(len(set(factor_list[i]).intersection(factors))) / len(
factor_list[i]
)
if match_score > best_match_score:
best_match_score, best_match_cfg = match_score, inp.config
if best_match_cfg is None:
return
# mimic its tiling strategy
for knob_name in knob_names:
constraint = list(best_match_cfg[knob_name].size)
constraint[0] = -1
self.fallback_split(knob_name, constraint)
# copy other knobs
for knob_name in self.space_map.keys():
if not isinstance(self.space_map[knob_name], SplitSpace):
self._entity_map[knob_name] = best_match_cfg[knob_name]
def __setitem__(self, name, entity):
"""set the entity(knob) of by name
Parameters
----------
name: str
name of the entity
entity: SplitEntity, ReorderEntity, AnnotateEntity, OtherOptionEntity
value of the entity
"""
self._entity_map[name] = entity
def __repr__(self):
return f"{str(self._entity_map)[12:-1]},{self.code_hash}"
| 48,934 | 32.865052 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/task/dispatcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Template dispatcher module.
A dispatcher is a function that can contains multiple behaviors.
Its specific behavior is can be controlled by DispatchContext.
DispatchContext is used in two ways, usually via different implementation
of the DispatchContext base class.
- During search, we can use it to pass the current proposal from tuner.
- During evaluation, we can use it to set pick the best policy.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
from io import TextIOBase
import logging
from os import PathLike
from pathlib import Path
from typing import List, Iterable, Tuple, Union
import numpy as np
from .space import FallbackConfigEntity
from .. import env as _env
from ..measure import MeasureInput, MeasureResult
logger = logging.getLogger("autotvm")
Records = Union[
Union[str, bytes, Path], # Path-like objects
TextIOBase, # File-like objects
Iterable[Tuple[MeasureInput, MeasureResult]],
]
class DispatchContext(object):
"""
Base class of dispatch context.
DispatchContext enables the target and workload
specific dispatch mechanism for templates.
"""
current = None
# a set to prevent print duplicated message
warning_messages = set()
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload):
"""
Query the context to get the specific config for a template.
If cannot find the result inside this context, this function will query it
from the upper contexts.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
ret = self._query_inside(target, workload)
if ret is None:
ret = self._old_ctx.query(target, workload)
return ret
def update(self, target, workload, cfg):
"""
Update context with a specific config.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
cfg : ConfigSpace
The specific configuration.
Note
----
This interface is for cases when TVM decides to replace an operator in the graph.
For example, `AlterOpLayout` pass (enables when `opt_level = 3`) replaces `NCHW`
convolution with `NCHW[x]c` implementation on x86 CPUs.
Thus in TOPI, we first query schedule using original `NCHW` workload,
then update the dispatcher with the new `NCHW[x]c` workload.
So that later on, `NCHW[x]c` convolution can get schedule from the dispatcher using
its own workload directly.
.. code-block:: python
@conv2d_alter_layout.register("cpu")
def _alter_conv2d_layout(attrs, inputs, tinfo):
workload = get_conv2d_workload(...)
dispatch_ctx = autotvm.task.DispatchContext.current
target = tvm.target.Target.current()
config = dispatch_ctx.query(target, workload)
# Get conv2d_NCHWc workload from config
# new_workload = ...
# new_inputs = ...
# new_attrs = ...
# Store altered operator's config
dispatch_ctx.update(target, new_workload, config)
return sym.contrib.conv2d_NCHWc(*new_inputs, **new_attrs)
We directly store `config` back because `conv2d_NCHW` and `conv2d_NCHWc`
share the same schedule parameters.
One can construct a new `ConfigEntity` if this is not the case.
"""
raise NotImplementedError()
def _query_inside(self, target, workload):
"""
Query the context to get the specific config for a template.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
class ApplyConfig(DispatchContext):
"""Apply a deterministic config entity for all queries.
Parameters
----------
config : ConfigSpace or ConfigEntity
The specific configuration we care about.
"""
def __init__(self, config):
super(ApplyConfig, self).__init__()
self._config = config
self.workload = None
def _query_inside(self, target, workload):
"""Override query"""
self.workload = workload
return self._config
def update(self, target, workload, cfg):
"""Override update"""
self.workload = workload
self._config = cfg
class ApplyFixedConfig(DispatchContext):
"""Apply a config of a deterministic schedule.
This is used for building a single Relay operator with deterministic schedule
for testing schedules at Relay level.
Parameters
----------
tasks : list[tvm.autotvm.task.task.Task]
List of autoTVM tasks.
schedule_names : str, List[str]
Name of schedules to use.
"""
def __init__(self, tasks, schedule_names: Union[str, List[str]]):
super(ApplyFixedConfig, self).__init__()
if isinstance(schedule_names, str):
self._schedule_names = list(schedule_names)
elif isinstance(schedule_names, list):
self._schedule_names = schedule_names
else:
raise RuntimeError("Incorrect type: " + schedule_names)
self._tasks = tasks
self.workload = None
def _query_inside(self, target, workload):
"""Override query"""
self.workload = workload
# Create a config from correct task
for task in self._tasks:
if task.name == workload[0]:
config = task.config_space.get(0)
break
if not config:
raise RuntimeError(f"workload: {str(workload)} does not exist in {str(self._tasks)}")
# Add low cost to the target schedule and high cost to others.
if workload[0] in self._schedule_names:
config.cost = 1e-6
else:
config.cost = 100000
return config
def update(self, target, workload, cfg):
"""Override update"""
self.workload = workload
self._config = cfg
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : None, Records, or iterator of Records objects, where a
Records object is a path-like object, a file-like object,
or an iterator of (MeasureInput, MeasureResult).
Collection of tuning records. If multiple Records objects are passed, their
contents will be merged.
"""
def __init__(self, records: Union[None, Records, Iterable[Records]]):
super(ApplyHistoryBest, self).__init__()
self.best_by_targetkey = {}
self.best_by_model = {}
self._best_user_defined = {}
if records:
self.load(records)
def load(self, records: Union[Records, Iterable[Records]]):
"""Load records to this dispatch context
Parameters
----------
records : str, list of str, or iterator of (autotvm.measure.MeasureInput,\
autotvm.measure.MeasureResult)
Collection of tuning records. If multiple Records objects are passed, their
contents will be merged.
"""
# pylint: disable=import-outside-toplevel
from ..record import load_from_file, load_from_buffer
def _unpack_records(
records: Union[Records, Iterable[Records]]
) -> List[Tuple[MeasureInput, MeasureResult]]:
if isinstance(records, (str, bytes, PathLike)):
return load_from_file(records)
if isinstance(records, TextIOBase):
return load_from_buffer(records)
joint_records = []
for record in records:
if isinstance(record, Tuple) and isinstance(record[0], MeasureInput):
joint_records.append(record)
else:
joint_records += _unpack_records(record)
return joint_records
flattened_records = _unpack_records(records)
if not flattened_records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in flattened_records:
counter += 1
if res.error_no != 0:
continue
# use target keys in tvm target system as key to build best map
for k in inp.target.keys:
key = (k, inp.task.workload)
if key not in best_by_targetkey:
best_by_targetkey[key] = (inp, res)
else:
_, other_res = best_by_targetkey[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_targetkey[key] = (inp, res)
# use model as key to build best map
key = (inp.target.model, inp.task.workload)
if key not in best_by_model:
if inp.target.model != "unknown":
best_by_model[key] = (inp, res)
else:
_, other_res = best_by_model[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_model[key] = (inp, res)
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload):
if target is None:
raise RuntimeError(
"Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.Target('llvm'):`"
" above the dispatcher call. So does other target. "
)
# first try matching by model
key = (target.model, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_model:
inp, _ = self.best_by_model[key]
return inp.config
# then try matching by target key
for k in target.keys:
key = (k, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_targetkey:
inp, _ = self.best_by_targetkey[key]
return inp.config
return None
def update(self, target, workload, cfg):
model = target.model
key = (model, workload)
# assume user provided config is the best
cfg.cost = 0
self._best_user_defined[key] = cfg
for k in target.keys:
key = (k, workload)
self._best_user_defined[key] = cfg
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
Any tunable template can be called under this context.
This is the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
def _query_inside(self, target, workload):
key = (str(target), workload)
if key in self.memory:
return self.memory[key]
if not _env.GLOBAL_SCOPE.silent:
msg = (
f"Cannot find config for target={target}, workload={workload}. A fallback "
f"configuration is used, which may bring great performance regression."
)
if msg not in DispatchContext.warning_messages:
DispatchContext.warning_messages.add(msg)
logger.warning(msg)
cfg = FallbackConfigEntity()
# cache this config
self.memory[key] = cfg
return cfg
def clear_cache(self, target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
"""
key = (str(target), workload)
if key in self.memory:
del self.memory[key]
def update(self, target, workload, cfg):
key = (str(target), workload)
self.memory[key] = cfg
DispatchContext.current = FallbackContext()
def clear_fallback_cache(target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Note
----
This is used in alter_op_layout to clear the bad cache created before call topi compute function
"""
context = DispatchContext.current
while not isinstance(context, FallbackContext):
context = context._old_ctx
context.clear_cache(target, workload)
class ApplyGraphBest(DispatchContext):
"""Load the graph level tuning optimal schedules.
The input records should be in the ascending order of
node index for target operator. Usually this can be obtained
with graph tuner.
This context maintains an internal counter to indicate the current
node index.
"""
def __init__(self, records: Records):
"""
Parameters
----------
records : str or iterator of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from ..record import load_from_file, load_from_buffer
super(ApplyGraphBest, self).__init__()
if isinstance(records, (str, bytes, PathLike)):
records = load_from_file(records)
elif isinstance(records, TextIOBase):
records = load_from_buffer(records)
else:
records = list(records)
self._records = list(records)
self._counter = 0
self._global_cfg_dict = {}
def _query_inside(self, target, workload):
"""
Query the context to get config from records.
Parameters
----------
target : Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
if self._counter < len(self._records):
cfg = self._records[self._counter][0].config
wkl = self._records[self._counter][0].task.workload
if workload is not None:
assert wkl == workload
self._counter += 1
self.update(target, wkl, cfg)
cfg.workload = wkl
return cfg
key = (str(target), workload)
if key not in self._global_cfg_dict:
msg = (
f"Config for target={target}, workload={workload} is missing in ApplyGraphBest "
f"context. A fallback configuration is used, which may bring great performance "
f"regression."
)
logger.warning(msg)
cfg = FallbackConfigEntity()
self._global_cfg_dict[key] = cfg
else:
cfg = self._global_cfg_dict[key]
return cfg
def update(self, target, workload, cfg):
key = (str(target), workload)
self._global_cfg_dict[key] = cfg
| 17,019 | 31.419048 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/task/topi_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name,unused-argument
"""
Decorators for registering tunable templates to TOPI.
These decorators can make your simple implementation be able to use different configurations
for different workloads.
Here we directly use all arguments to the TOPI call as "workload", so make sure all the arguments
(except tvm.te.Tensor) in you calls are hashable. For tvm.te.Tensor,
we will serialize it to a hashable tuple.
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
import functools
import tvm.te._ffi_api
from tvm.target import Target
from tvm.te import tensor
from .task import (
args_to_workload,
serialize_args,
DispatchContext,
_register_task_compute,
_register_task_schedule,
)
# Task extractor for relay program
class TaskExtractEnv:
"""Global environment for extracting tuning tasks from graph"""
current = None
registered = None
def __init__(self, allow_duplicate=False):
self.allow_duplicate = allow_duplicate
self.task_collection = []
self.wanted_relay_ops = None
self.modified_funcs = []
self.tracing = False
def __enter__(self):
self.task_collection = []
self.tracing = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.tracing = False
def reset(self, wanted_relay_ops=None):
"""Reset task collections
Parameters
----------
wanted_relay_ops: List of tvm.ir.Op
The relay ops to be extracted
"""
self.task_collection = []
self.wanted_relay_ops = wanted_relay_ops
def add_task(self, task_name, args):
"""Add AutoTVM task
Parameters
----------
task_name: str
AutoTVM task name.
args: tuple
Arguments to the TOPI function.
"""
key = (task_name, serialize_args(args))
if self.allow_duplicate or key not in self.task_collection:
self.task_collection.append(key)
def get_tasks(self):
"""Get collected tasks
Returns
-------
tasks: List of tuple(name, args)
A list of tasks extracted from the graph
"""
return self.task_collection
@staticmethod
def get(allow_duplicate=False):
"""Get the single instance of TaskExtractEnv
Parameters
----------
allow_duplicate : boolean
Whether to fetch all workloads in the network,
even though some of them are the same. This is
useful for graph tuning.
Returns
-------
env: TaskExtractEnv
The single instance of TaskExtractEnv
"""
if not TaskExtractEnv.current:
TaskExtractEnv.current = TaskExtractEnv(allow_duplicate)
else:
TaskExtractEnv.current.allow_duplicate = allow_duplicate
return TaskExtractEnv.current
def register_topi_compute(task_name, func=None):
"""Register a tunable template for a topi compute function.
The registration will wrap this topi compute to take `cfg` as the first argument,
followed by the original argument list. It uses all its argument as workload and
stores this "workload" to its final ComputeOp, which can be used to reconstruct
"workload" in the following topi_schedule call.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_compute):
@functools.wraps(topi_compute)
@_register_task_compute(task_name)
def wrapper(*args, **kwargs):
"""wrapper function for topi compute"""
assert not kwargs, "Do not support kwargs in template function call"
task_env = TaskExtractEnv.current
if task_env is not None and task_env.tracing:
task_env.add_task(task_name, args)
workload = args_to_workload(args, task_name)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
node = topi_compute(cfg, *args)
# attach workload to return op
op = node.op
attrs = {}
for k, v in node.op.attrs.items():
attrs[k] = v
attrs["workload"] = workload
if isinstance(op, tensor.ComputeOp):
op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body)
elif isinstance(op, tensor.ExternOp):
op = tvm.te._ffi_api.ExternOp(
op.name,
op.tag,
attrs,
op.inputs,
op.input_placeholders,
op.output_placeholders,
op.body,
)
else:
raise RuntimeError("Unsupported op type: " + str(type(op)))
if isinstance(node, tensor.Tensor):
return op.output(0)
return [op.output(i) for i in range(len(node))]
return wrapper
if func:
return _decorate(func)
return _decorate
def register_topi_schedule(task_name, func=None):
"""Register a tunable template for a topi schedule function.
The registration will wrap this topi schedule to take `cfg` as the first argument,
followed by the original argument list.
Note that this function will try to find "workload" from all the ComputeOp in the input.
You can attach "workload" to your compute op by using :any:`register_topi_compute`.
The task name has to be the same as that of the corresponding topi compute function.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_schedule):
@functools.wraps(topi_schedule)
@_register_task_schedule(task_name)
def wrapper(outs, *args, **kwargs):
"""wrapper function for topi schedule"""
workload = get_workload(outs, task_name)
if workload is None:
raise RuntimeError(
f"Cannot find TOPI workload {task_name}. "
"Is it registered with `register_topi_compute`?"
)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
return topi_schedule(cfg, outs, *args, **kwargs)
return wrapper
if func:
return _decorate(func)
return _decorate
def get_workload(outs, task_name=None):
"""Retrieve the workload from outputs"""
visited = set()
def traverse(tensors):
"""traverse all ops to find attached workload"""
for t in tensors:
op = t.op
if op in visited:
continue
visited.add(op)
wkl = traverse(op.input_tensors)
if wkl is not None:
return wkl
if "workload" in op.attrs:
ret = args_to_workload(op.attrs["workload"])
if task_name is None or ret[0] == task_name:
return ret
return None
outs = [outs] if isinstance(outs, tensor.Tensor) else outs
return traverse(outs)
| 8,590 | 30.354015 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/task/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task is a tunable composition of template functions.
Tuner takes a tunable task and optimizes the joint configuration
space of all the template functions in the task.
This module defines the task data structure, as well as a collection(zoo)
of typical tasks of interest.
"""
from .task import (
Task,
create,
get_config,
args_to_workload,
template,
serialize_args,
deserialize_args,
)
from .space import ConfigSpace, ConfigEntity
from .code_hash import attach_code_hash, attach_code_hash_to_arg
from .dispatcher import (
DispatchContext,
ApplyConfig,
ApplyFixedConfig,
ApplyHistoryBest,
FallbackContext,
clear_fallback_cache,
ApplyGraphBest,
)
from .topi_integration import (
register_topi_compute,
register_topi_schedule,
TaskExtractEnv,
get_workload,
)
from .relay_integration import extract_from_program, extract_from_multiple_program
| 1,701 | 31.113208 | 82 | py |
tvm | tvm-main/python/tvm/autotvm/task/code_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Decorator functions for hashing schedule code
code hashing is used to check the consistence of schedule code and the parameters loaded from log
"""
import functools
import inspect
import zlib
from tvm.te import schedule
def attach_code_hash(s):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
s: Schedule
tvm.te.schedule.Schedule to attach the hash to
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode())
s.code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
def attach_code_hash_to_arg(arg_idx=1):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
arg_idx: int
index of the argument (expected to be a Schedule) to attach the code
hash to
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
assert isinstance(args[arg_idx], schedule.Schedule)
raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode())
args[arg_idx].code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
| 2,137 | 29.112676 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/base_graph_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-instance-attributes,too-many-branches,too-many-nested-blocks,invalid-name,unused-argument,unused-variable,no-member,no-value-for-parameter
"""Base class for graph tuner."""
import logging
from abc import abstractmethod
import numpy as np
from tvm import topi
import tvm
from tvm import te
from tvm import autotvm, relay
from tvm.autotvm.task import get_config
from tvm.autotvm.record import encode, load_from_file
from tvm.autotvm.measure import MeasureResult, MeasureInput
from tvm.target import Target
from ...target import Target
from .utils import (
is_boundary_node,
get_in_nodes,
get_out_nodes,
has_multiple_inputs,
bind_inputs,
expr2graph,
)
from ._base import INVALID_LAYOUT_TIME
from ._base import OPT_OUT_OP
def get_infer_layout(task_name):
if task_name.startswith("conv2d"):
return topi.nn.conv2d_infer_layout
if task_name.startswith("depthwise_conv2d"):
return topi.nn.depthwise_conv2d_infer_layout
raise ValueError(f"Cannot find infer layout for task {task_name}")
@autotvm.template("layout_transform")
def layout_transform(*args):
"""Autotvm layout transform template."""
cfg = get_config()
cfg.add_flop(-1)
data = args[0]
out = topi.layout_transform(*args)
sch = topi.generic.schedule_injective([out])
return sch, [data, out]
class BaseGraphTuner(object):
"""Class to search schedules considering both kernel execution time and
layout transformation time.
Before creating a Graph Executor instance, schedule candidates for all kernels in
graph should be provided through tensor-level tuning.
"""
def __init__(
self,
graph,
input_shapes,
records,
target_ops,
target,
max_sch_num=20,
dtype="float32",
verbose=True,
log_file="graph_tuner.log",
log_level=logging.DEBUG,
name="graph_tuner",
):
"""Create a GlobalTuner instance. Local schedule searching for all nodes with
target_op in the input graph and layout transformation benchmark need to be
executed before initialization.
graph : tvm.relay.function.Function
Input graph
input_shapes : dict of str to tuple.
Input shapes of graph
records : str or iterator of (MeasureInput, MeasureResult)
Collection of kernel level tuning records.
If it is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
target_ops : List of tvm.ir.Op
Target tuning operators.
target : str or tvm.target
Compilation target.
max_sch_num : int, optional
Maximum number of schedule candidates for each workload.
dtype : str, optional
Data type.
log_file : str, optional
graph tuner log file name
name : str, optional
Name of global tuner.
"""
self._node_list = []
self._layout_transform_perf_records = {}
self._layout_transform_interlayer_cost = {}
self._input_shapes = input_shapes
self._target_ops = target_ops
self._name = name
self._max_sch_num = max_sch_num
self._optimal_sch_dict = {}
self._records = records
self._dtype = dtype
if isinstance(target, str):
target = Target(target)
self._target = target
self._optimal_record_dict = {}
# Set up logger
self._verbose = verbose
self._logger = logging.getLogger(name + "_logger")
need_file_handler = need_console_handler = True
for handler in self._logger.handlers:
if handler.__class__.__name__ == "FileHandler":
need_file_handler = False
if handler.__class__.__name__ == "StreamHandler":
need_console_handler = False
self._log_level = log_level
self._log_file = log_file
self._formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
self._logger.setLevel(log_level)
if need_file_handler:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(self._formatter)
self._logger.addHandler(file_handler)
if self._verbose and need_console_handler:
console_handler = logging.StreamHandler()
console_handler.setFormatter(self._formatter)
self._logger.addHandler(console_handler)
self._logger.setLevel(log_level)
self._logger.propagate = False
# Generate workload and schedule dictionaries.
if isinstance(graph, tvm.IRModule):
graph = graph["main"]
if isinstance(graph, relay.function.Function):
node_dict = {}
graph = bind_inputs(graph, input_shapes, dtype)
expr2graph(graph, self._target_ops, node_dict, self._node_list, target)
else:
raise RuntimeError(f"Unsupported graph type: {type(graph)}")
self._graph = graph
self._in_nodes_dict = get_in_nodes(self._node_list, self._target_ops, input_shapes.keys())
if len(self._in_nodes_dict) == 0:
raise RuntimeError(
f"Could not find any input nodes with whose "
f"operator is one of {self._target_ops}"
)
self._out_nodes_dict = get_out_nodes(self._in_nodes_dict)
self._fetch_cfg()
self._opt_out_op = OPT_OUT_OP
# Setup infer_layout for elemwise-like nodes
# Note: graph tuner currently only supports tuning of single input and single output
# op as target op, such as conv2d, dense and conv2d_transpose. In this case, we can
# reuse infer_layout function from target ops for elemwise-like nodes. The behavior
# is to modify the first tensor shape of input workload to the output shape of
# elemwise-like node, and use infer_layout function from input op to generate layouts.
input_names = self._input_shapes.keys()
for idx in sorted(self._in_nodes_dict.keys()):
if has_multiple_inputs(self._node_list, idx, input_names, self._opt_out_op):
node_entry = self._node_list[idx]
node_entry["topi_op"] = []
node_entry["workloads"] = []
for input_idx in self._in_nodes_dict[idx]:
input_node = self._node_list[input_idx]
if not is_boundary_node(input_node, input_names):
input_topi_op = input_node["topi_op"][0]
node_entry["topi_op"].append(input_topi_op)
# Only replace the first input tensor
input_workload = input_node["workloads"][0]
first_tensor = input_workload[1]
dtype = first_tensor[-1]
new_shape = tuple([val.value for val in node_entry["types"][0].shape])
actual_workload = (
(input_workload[0],)
+ (("TENSOR", new_shape, dtype),)
+ input_workload[2:]
)
node_entry["workloads"].append(actual_workload)
if "record_candidates" not in node_entry:
node_entry["record_candidates"] = input_node["record_candidates"]
else:
node_entry["topi_op"].append(None)
node_entry["workloads"].append(None)
def _fetch_cfg(self):
"""Read and pre-process input schedules."""
if isinstance(self._records, str):
records = load_from_file(self._records)
else:
records = self._records
cfg_dict = {}
for record in records:
in_measure, _ = record
workload = in_measure.task.workload
if workload not in cfg_dict:
cfg_dict[workload] = []
cfg_dict[workload].append(record)
cache_dict = {}
for key in self._in_nodes_dict:
node_entry = self._node_list[key]
if node_entry["op"] not in self._target_ops:
continue
workload = node_entry["workloads"][0]
if workload in cache_dict:
node_entry["record_candidates"] = cache_dict[workload]
continue
record_candidates = []
infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
layout_tracking_dict = {}
for record in cfg_dict[workload]:
in_measure, out_measure = record
workload = in_measure.task.workload
cfg = in_measure.config
# For multiple cfgs which produces the same in/out layouts,
# only the most efficient one is preserved.
with self._target:
layouts = infer_layout_func(workload, cfg)
if layouts in layout_tracking_dict:
cost = out_measure.costs[0]
current_best_cost = layout_tracking_dict[layouts][1].costs[0]
if cost < current_best_cost:
layout_tracking_dict[layouts] = record
else:
layout_tracking_dict[layouts] = record
sorted_records = sorted(
layout_tracking_dict.values(), key=lambda item: item[1].costs[0]
)
for i in range(min(self._max_sch_num, len(sorted_records))):
record_candidates.append(sorted_records[i])
node_entry["record_candidates"] = record_candidates
cache_dict[workload] = record_candidates
def _iterate_layout_transform(self, callback):
"""Iterate all possible layout transformations and execute callback for each
iteration. callback function accepts 6 arguments: from_node_idx, to_node_idx,
from_sch_idx, to_sch_idx, args which represent the argument list of layout
transformation and is_valid showing whether this is a valid layout transformation.
"""
input_names = self._input_shapes.keys()
pair_tracker = set()
for key, val in self._in_nodes_dict.items():
node_entry = self._node_list[key]
target_input_idx = -1
target_input_pos = -1
if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op):
for i, item in enumerate(val):
node = self._node_list[item]
if not is_boundary_node(node, input_names):
target_input_idx = item
target_input_pos = i
break
for i, item in enumerate(val):
i_idx = item
in_node_entry = self._node_list[i_idx]
if is_boundary_node(in_node_entry, input_names):
continue
if node_entry["op"] in self._target_ops:
o_idx = key
o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
o_wkl = node_entry["workloads"][0]
i_topi_op = in_node_entry["topi_op"][0]
i_wkl = in_node_entry["workloads"][0]
pivot = 0
while not i_wkl:
pivot += 1
i_topi_op = in_node_entry["topi_op"][pivot]
i_wkl = in_node_entry["workloads"][pivot]
i_infer_layout_func = get_infer_layout(i_topi_op)
else:
o_idx = target_input_idx
if i <= target_input_pos:
continue
o_infer_layout_func = get_infer_layout(node_entry["topi_op"][0])
o_wkl = node_entry["workloads"][target_input_pos]
i_infer_layout_func = get_infer_layout(node_entry["topi_op"][i])
i_wkl = node_entry["workloads"][i]
if (i_idx, o_idx) in pair_tracker:
continue
pair_tracker.add((i_idx, o_idx))
for m, i_record in enumerate(in_node_entry["record_candidates"]):
for n, o_record in enumerate(node_entry["record_candidates"]):
i_cfg, o_cfg = i_record[0].config, o_record[0].config
with self._target:
i_input_info, i_output_info = i_infer_layout_func(i_wkl, i_cfg)
o_input_info, o_output_info = o_infer_layout_func(o_wkl, o_cfg)
if (
len(i_input_info) > 1
or len(i_output_info) > 1
or len(o_input_info) > 1
or len(o_output_info) > 1
):
raise RuntimeError(
"Graph tuner only supports target operator "
"with single input and single output. "
"Please check target_ops argument."
)
in_shape, in_layout = i_output_info[0]
if node_entry["op"] in self._target_ops:
_, out_layout = o_input_info[0]
else:
_, out_layout = o_output_info[0]
data_placeholder = te.placeholder(in_shape, name="data", dtype=self._dtype)
args = [data_placeholder, in_layout, out_layout]
callback(i_idx, o_idx, m, n, args)
def _create_matrix_callback(self, from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args):
"""Create dictionary containing matrix format of layout transformation
between nodes."""
in_layout, out_layout = args[1], args[2]
ltf_workload = autotvm.task.args_to_workload(args, "layout_transform")
idx_pair_key = (from_node_idx, to_node_idx)
if in_layout == out_layout:
layout_transform_time = 0
else:
layout_transform_time = self._layout_transform_perf_records[ltf_workload][1].costs[0]
if idx_pair_key not in self._layout_transform_interlayer_cost:
self._layout_transform_interlayer_cost[idx_pair_key] = []
if len(self._layout_transform_interlayer_cost[idx_pair_key]) <= from_sch_idx:
self._layout_transform_interlayer_cost[idx_pair_key].append([])
self._layout_transform_interlayer_cost[idx_pair_key][from_sch_idx].append(
layout_transform_time
)
def benchmark_layout_transform(
self,
min_exec_num=100,
timeout=10,
use_rpc=False,
device_key=None,
host="127.0.0.1",
port=9190,
n_parallel=1,
build_func="default",
layout_records=None,
target_host=None,
infer_layout=False,
runner=None,
):
"""Benchmark all possible layout transformation in the graph,
given a set of schedule candidates for each workload of target operator.
Parameters
----------
min_exec_num : int, optional
Minimum number of execution. Final execution time is the average of
all execution time.
timeout : int, optional
Time out for each execution.
use_rpc : boolean, optional
Whether to use rpc mode for benchmarking.
device_key : str, optional
Remote device key which can be queried by
python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
host : str, optional
IP address used to create RPC tracker on host machine.
port : int, optional
Port number used to create RPC tracker on host machine.
n_parallel: int, optional
The number of measurement task that can run in parallel.
Set this according to the number of cpu cores (for compilation) and
the number of devices you have (for measuring generate code).
build_func: str or callable, optional
'default': call default builder. This works for normal target (llvm, cuda)
'ndk': use Android NDK to create shared library. Use this for android target.
callable: customized build function for other backends (e.g. VTA).
See autotvm/measure/measure_methods.py::default_build_func for example.
layout_records : str or iterator of (MeasureInput, MeasureResult). optional
Collection of layout_transform benchmarking records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
If this argument is set, graph tuner will first check whether layout_transform
workload already exists in records and skip benchmarking if possible.
target_host : str, optional
str or :any:`tvm.target.Target` optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
infer_layout : bool, optional
Whether to infer layout transformation time if it doesn't exist in records, instead
of benchmarking on target device.
This might bring performance loss comparing to benchmarking layout transformation.
runner : Runner, optional
Accept a user-supplied runner
"""
self._logger.info("Start to benchmark layout transformation...")
self._target, target_host = Target.canon_target_and_host(self._target, target_host)
if layout_records is None and infer_layout:
raise RuntimeError("Requires some records to infer layout transformation time.")
if isinstance(layout_records, str):
layout_records = load_from_file(layout_records)
if not layout_records and infer_layout:
raise RuntimeError("Records must be non-empty to infer layout transformation time.")
if isinstance(layout_records, str):
layout_records = load_from_file(layout_records)
num_flops, total_time = 0, 0
if layout_records is not None:
for record in layout_records:
ltf_wkl = record[0].task.workload
self._layout_transform_perf_records[ltf_wkl] = record
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops if num_flops > 0 else 0
args_list = []
def _fetch_args_callback(from_node_idx, to_node_idx, from_sch_idx, to_sch_idx, args):
"""Callback function to fetch layout transform args"""
_, in_layout, out_layout = args
if in_layout != out_layout:
args_list.append(args)
self._iterate_layout_transform(_fetch_args_callback)
def _log_to_list(record_list):
"""Callback to log result to a list."""
def _callback(_, inputs, results):
"""Callback implementation"""
record_list.append((inputs[0], results[0]))
return _callback
builder = autotvm.LocalBuilder(n_parallel=n_parallel, build_func=build_func)
if use_rpc:
if device_key is None:
raise RuntimeError("device_key need to be set to use rpc tracker mode.")
runner = autotvm.measure.RPCRunner(
device_key,
host,
port,
n_parallel=n_parallel,
number=min_exec_num,
repeat=1,
timeout=timeout,
)
elif not runner:
runner = autotvm.LocalRunner(number=min_exec_num, repeat=1, timeout=timeout)
measure_option = autotvm.measure_option(builder=builder, runner=runner)
for args in args_list:
data, in_layout, out_layout = args
ltf_workload = autotvm.task.args_to_workload(args, "layout_transform")
if ltf_workload in self._layout_transform_perf_records:
continue
if infer_layout:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
# Rule out invalid layout transformations
out = topi.layout_transform(data, in_layout, out_layout)
out_flops = 1
for i in topi.utils.get_const_tuple(out.shape):
out_flops *= i
if flops != out_flops:
inferred_time = INVALID_LAYOUT_TIME
else:
inferred_time = flops * avg_time
record_input = MeasureInput(target=self._target, task=None, config=None)
record_output = MeasureResult(
costs=(inferred_time,), error_no=0, all_cost=-1, timestamp=-1
)
self._layout_transform_perf_records[ltf_workload] = (record_input, record_output)
continue
records = []
task = autotvm.task.create("layout_transform", args=args, target=self._target)
tuner = autotvm.tuner.GridSearchTuner(task)
tuner.tune(n_trial=1, measure_option=measure_option, callbacks=[_log_to_list(records)])
if not isinstance(records[0][1].costs[0], float):
records[0] = (records[0][0], records[0][1]._replace(costs=(INVALID_LAYOUT_TIME,)))
self._layout_transform_perf_records[ltf_workload] = records[0]
self._iterate_layout_transform(self._create_matrix_callback)
self._logger.info("Benchmarking layout transformation successful.")
@property
def layout_transform_perf_records(self):
"""Get layout transformation dictionary for input graph.
Returns
-------
layout_transform_perf_records : dict of tuple to (MeasureInput, MeasureResult)
Layout transformation dictionary for input graph.
"""
return self._layout_transform_perf_records
def get_optimal_records(self):
"""Convert optimal record dictionary to a list of records
with ascending order of node index in graph.
Returns
-------
sch_list : list of tuple
List of records with ascending order of node index in graph.
"""
ordered_index_list = sorted(self._optimal_record_dict.keys())
ret = []
for index in ordered_index_list:
node_entry = self._node_list[index]
if node_entry["op"] not in self._target_ops:
continue
ret.append(node_entry["record_candidates"][self._optimal_record_dict[index]])
return ret
def write_opt_sch2record_file(self, record_file="graph_opt_schedule.log"):
"""Write graph level optimal schedules into file.
Parameters
----------
record_file : str, optional
Output schedule file.
"""
with open(record_file, "a") as out_file:
records = self.get_optimal_records()
for record in records:
out_file.write(encode(record[0], record[1]) + "\n")
msg = f"Writing optimal schedules to {record_file} successfully."
self._logger.info(msg)
@abstractmethod
def run(self, **kwargs):
"""Run graph tuning."""
| 25,140 | 41.467905 | 220 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-error,too-many-locals,too-many-statements,too-many-branches,unused-variable
"""Dynamic programming tuner."""
import sys
import numpy as np
from ._base import MAX_OUTPUT_NODES
from .base_graph_tuner import BaseGraphTuner
from .dynamic_programming_stage import DPStage
from .utils import has_multiple_inputs, is_boundary_node
if sys.version_info[0] == 3:
import queue
else:
import Queue as queue
class DPTuner(BaseGraphTuner):
"""Tuner which uses dynamic programming to solve MDP problem.
Note: currently dynamic programming is used to solve this MDP problem. However,
this problem is intrinsically non-polynomial. DP can't apply for more complicated
models, such as networks with many element-wise sum operators. In this case, switch
to heuristic algorithm such as PBQP tuner.
"""
def __init__(self, *args, **kwargs):
"""Create a dynamic programming tuner."""
super(DPTuner, self).__init__(*args, **kwargs)
self._num_states = self._max_num_states = None
self._stage_dict = {}
self._dep_dict = {}
self._counted_nodes_set = set()
self._global_data_dict = {
"dtype": self._dtype,
"counted_nodes_set": self._counted_nodes_set,
"stage_dict": self._stage_dict,
"in_nodes_dict": self._in_nodes_dict,
"out_nodes_dict": self._out_nodes_dict,
"dep_dict": self._dep_dict,
"node_list": self._node_list,
"input_shapes": self._input_shapes,
"layout_transform_interlayer_cost": self._layout_transform_interlayer_cost,
}
def _check_num_states(self, num_states):
"""Track the number of states."""
self._num_states += num_states
if self._max_num_states is not None:
if self._num_states > self._max_num_states:
raise RuntimeError(
"Too many states detected while running dynamic "
"programming: got %d states but upper limit is %d."
% (self._num_states, self._max_num_states)
)
def _forward(self):
"""Forward pass in DP to generate states for all stages."""
self._logger.info("Start forward pass...")
for node_idx in sorted(self._in_nodes_dict.keys()):
stage = DPStage(idx=node_idx, target_ops=self._target_ops, **self._global_data_dict)
self._check_num_states(stage.full_states.size)
self._stage_dict[node_idx] = stage
self._logger.info("Finished forward pass.")
def _backward(self):
"""Backward pass in DP to generate optimal solution."""
self._logger.info("Start backward pass...")
input_names = self._input_shapes.keys()
optimal_record_dict = {}
# Pick optimal schedule for output nodes
output_idx_list = []
for key, val in self._out_nodes_dict.items():
if not val:
output_idx_list.append(key)
# Restrict number of output nodes to avoid numpy reshape error
if len(output_idx_list) > MAX_OUTPUT_NODES:
msg = (
"The number of outputs in graph is larger than upper "
"limit: %s vs %s. Usually this is caused by too many "
"LAYOUT_FIXED_OP in graph. Switch to greedily select schedule."
"No action required at this moment. We will continuously improve graph tuner"
% (len(output_idx_list), MAX_OUTPUT_NODES)
)
self._logger.warning(msg)
self._optimal_record_dict = {key: 0 for key in self._in_nodes_dict}
return
states_list, aligned_node_list = DPStage.align_states(
output_idx_list, self._stage_dict, self._node_list
)
num_states = states_list[0][3].size
self._check_num_states(num_states * len(output_idx_list))
aligned_node_shape = states_list[0][3].shape
min_time = 0
min_pos = -1
for states in states_list:
min_time += np.amax(states[3])
flatten_states_list = [current_states[3].flatten() for current_states in states_list]
for i in range(num_states):
current_time = 0
for j, current_states in enumerate(states_list):
current_time += flatten_states_list[j][i]
if min_time > current_time:
min_time = current_time
min_pos = i
for i, states in enumerate(states_list):
current_major_axis = states[1]
current_sch_idx = (
min_pos % (states[2] * aligned_node_shape[current_major_axis])
) // states[2]
optimal_record_dict[aligned_node_list[i]] = current_sch_idx
# Pick optimal schedule for dependencies of output nodes
for i in range(len(states_list), len(aligned_node_list)):
multiplier = 1
for j in range(i + 1, len(aligned_node_list)):
multiplier *= aligned_node_shape[j]
optimal_record_dict[aligned_node_list[i]] = (
min_pos // multiplier % aligned_node_shape[i]
)
# Backward pass to get optimal schedules for other nodes
bfs_q = queue.Queue()
visited = set()
for out_idx in output_idx_list:
bfs_q.put(out_idx)
while not bfs_q.empty():
node_idx = bfs_q.get()
visited.add(node_idx)
node = self._node_list[node_idx]
if is_boundary_node(node, input_names):
continue
optimal_sch_idx = optimal_record_dict[node_idx]
full_states = self._stage_dict[node_idx].full_states
if not has_multiple_inputs(self._node_list, node_idx, input_names, self._opt_out_op):
input_idx = self._in_nodes_dict[node_idx][0]
input_node = self._node_list[input_idx]
if is_boundary_node(input_node, input_names):
continue
if input_idx not in visited:
bfs_q.put(input_idx)
if input_idx not in optimal_record_dict:
dep_list = self._stage_dict[node_idx].dep
dep_idx = tuple([optimal_record_dict[item] for item in dep_list])
tmp = np.argmin(full_states, axis=1)
optimal_input_sch_idx = tmp[(optimal_sch_idx,) + dep_idx]
optimal_record_dict[input_idx] = optimal_input_sch_idx
else:
input_idx_list = self._in_nodes_dict[node_idx]
optimal_record_dict[input_idx_list[0]] = optimal_sch_idx
full_states_idx = self._stage_dict[node_idx].full_states_idx
tmp = full_states[optimal_sch_idx]
new_states_idx, new_states_pos = [], []
visited_states_idx, visited_states_pos = [], []
for i in range(1, len(full_states_idx)):
if full_states_idx[i] in optimal_record_dict:
visited_states_idx.append(full_states_idx[i])
visited_states_pos.append(i - 1)
else:
new_states_idx.append(full_states_idx[i])
new_states_pos.append(i - 1)
if visited_states_idx:
tmp = np.transpose(tmp, tuple(visited_states_pos + new_states_pos))
tmp = tmp[tuple([optimal_record_dict[idx] for idx in visited_states_idx])]
min_pos = np.argmin(tmp)
multiplier = 1
for i in range(len(new_states_idx)):
multiplier *= full_states.shape[new_states_pos[i] + 1]
for pos, idx in zip(new_states_pos, new_states_idx):
multiplier //= full_states.shape[pos + 1]
optimal_record_dict[idx] = min_pos // multiplier
min_pos %= multiplier
for input_idx in input_idx_list:
if input_idx not in visited:
bfs_q.put(input_idx)
self._optimal_record_dict = optimal_record_dict
for node_idx, _ in self._in_nodes_dict.items():
if self._node_list[node_idx]["op"] not in self._target_ops:
continue
self._logger.info("Finished backward pass...")
def run(self, **kwargs):
"""Run dynamic programming solver."""
max_num_states = None if "max_num_states" not in kwargs else kwargs["max_num_states"]
self._num_states = 0
self._max_num_states = max_num_states
self._logger.info("Start to run dynamic programming algorithm...")
self._forward()
self._backward()
self._logger.info("Finished DPExecutor run.")
| 9,642 | 45.138756 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper functions and global data"""
# We set a large time to represent an invalid layout-transformation.
# This number is set to be 10e9 seconds to align with autotvm.
INVALID_LAYOUT_TIME = 10e9
MAX_OUTPUT_NODES = 16
OPT_OUT_OP = ["layout_transform"]
| 1,074 | 37.392857 | 68 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-instance-attributes,too-many-branches,too-many-statements,too-many-arguments,too-many-locals,invalid-name
"""Stage class for dynamic programming tuner"""
import numpy as np
from .utils import is_boundary_node
class DPStage(object):
"""Class to represent node in Markov decision process. A stage has states
to represent different schedules of the current node. Since in this problem
the action is the schedule selected for current node, action can be fully
represented by states. No extra attribute needs for action.
In most cases, instance of this class should be created through DPTuner.
"""
def __init__(
self,
idx,
input_shapes,
node_list,
counted_nodes_set,
layout_transform_interlayer_cost,
stage_dict,
in_nodes_dict,
out_nodes_dict,
dep_dict,
target_ops,
dtype="float32",
):
"""Initialize a stage and create all states.
Parameters
----------
idx : int
Index for current node.
input_shapes : dict of string to tuple of int
Input shapes for current graph.
node_list : list of dict
List of all nodes for current graph.
counted_nodes_set : set of int
Global set recording whether the execution time of a node has been counted.
layout_transform_interlayer_cost : dict of tuple to list
Dictionary maps node index pair to layout transformation time between them.
stage_dict : dict of int to Stage
Global dictionary for all stages mapping node index to stage.
in_nodes_dict : dict of int to list of int
Dictionary maps node index to corresponding input node index.
out_nodes_dict : dict of int to list of int
Dictionary maps node index to corresponding output node index.
dep_dict : dict of int to set of int
Dictionary maps node index to dependent node index.
target_ops : list of str
Target operators
dtype : str, optional
Data type.
"""
self._global_input_shapes = input_shapes
self._global_input_names = input_shapes.keys()
self._global_node_list = node_list
self._global_counted_nodes_set = counted_nodes_set
self._global_layout_transform_interlayer_cost = layout_transform_interlayer_cost
self._global_stage_dict = stage_dict
self._global_in_nodes_dict = in_nodes_dict
self._global_out_nodes_dict = out_nodes_dict
self._global_dep_dict = dep_dict
self._idx = idx
self._node_entry = self._global_node_list[idx]
self._target_ops = target_ops
self._wkl = self._node_entry["workloads"][0]
self._record_list = self._node_entry["record_candidates"]
self._dep = []
self._dtype = dtype
self._states = None
self._full_states = None
self._full_states_idx = None
self._create_states()
def _create_states(self):
"""Create states."""
node = self._global_node_list[self._idx]
if node["op"] in self._target_ops:
self._create_op_states()
else:
self._create_multi_inputs_states()
def _create_op_states(self):
"""State creation routine for nodes with target_op."""
input_idx = self._global_in_nodes_dict[self._idx][0]
input_node_entry = self._global_node_list[input_idx]
if is_boundary_node(input_node_entry, self._global_input_names):
self._full_states = np.array([record[1].costs[0] for record in self._record_list])
self._states = self._full_states
else:
input_stage = self._global_stage_dict[input_idx]
input_dep = input_stage.dep
input_states = input_stage.states
input_flatten_states = input_states.flatten()
input_record_list = input_node_entry["record_candidates"]
num_schedules = len(self._record_list)
num_input_schedules = len(input_record_list)
num_input_states = input_flatten_states.shape[0]
full_states_shape = tuple(
[num_schedules, num_input_schedules]
+ [
len(self._global_node_list[dep_idx]["record_candidates"])
for dep_idx in input_dep
]
)
self._full_states = np.zeros(full_states_shape).flatten().astype("float32")
self._full_states_idx = [self._idx, input_idx] + input_dep
dep_multiplier = 1
for i in range(2, len(full_states_shape)):
dep_multiplier *= full_states_shape[i]
input_node_time_counted = input_idx in self._global_counted_nodes_set
for i in range(num_schedules):
current_sch_time = float(self._record_list[i][1].costs[0])
for j in range(num_input_states):
input_sch_idx = j // dep_multiplier
layout_transform_time = self._global_layout_transform_interlayer_cost[
(input_idx, self._idx)
][input_sch_idx][i]
if input_node_time_counted:
total_time = current_sch_time + layout_transform_time
else:
total_time = (
current_sch_time + layout_transform_time + input_flatten_states[j]
)
current_state_idx = i * num_input_states + j
self._full_states[current_state_idx] = total_time
if not input_node_time_counted:
self._global_counted_nodes_set.add(input_idx)
self._full_states = self._full_states.reshape(full_states_shape)
# If out degree of input node is 1, we can remove the dimension of input node,
# since the states of input node will not be needed any more. Otherwise, input
# node should become a dependency.
if len(self._global_out_nodes_dict[input_idx]) == 1:
self._states = np.amin(self._full_states, axis=1)
self._dep = list(input_dep)
else:
self._states = self._full_states
self._dep = [
input_idx,
] + input_dep
# Update global dependency dictionary.
# This is to monitor the dependency states to decide
# when a dependency can be eliminated, so that total
# number of states can be largely reduced.
for dep_idx in self._dep:
self._global_dep_dict[dep_idx].remove(self._idx)
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[dep_idx].add(child)
if len(self._global_out_nodes_dict[self._idx]) > 1:
self._global_dep_dict[self._idx] = set()
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[self._idx].add(child)
def _create_multi_inputs_states(self):
"""State creation routine for multi_input operator
In tvm, layout transformation for an elemwise-like follow the rule which
all input operators transform their layouts to the leftmost input operator
layout. For example:
elemwise-sum
| | |
| | |
op0 op1 op2
In this block, the possible layout transformations are: op1 -> op0 and op2 -> op0.
In graph tuning, a 3-D array with shape (k0, k1, k2) can represent the layout
transformations between these three nodes. It is also possible some earlier states
belong to other nodes(We name them as dependency) are required for dynamic programming.
The final states array for this elemwise-sum can be with shape (e0, k0, k1, e1, k2).
To iterate through all states, we first align the shape of op0, op1 and op2 to be
(e0, k0, k1, e1, k2) by broadcasting the original states. We also record the axis of
each input node in the states array, together with the multiplier. For example,
the axis index for op0 is 1, and multiplier is k1 * e1 * k2. If current iterating index
in the flatten array is i, the index of op0 can be computed as:
i % (k0 * k1 * e1 * k2) // (k1 * e1 * k2).
"""
full_input_node_list = list(self._global_in_nodes_dict[self._idx])
input_index_list = []
# Remove input and ruled_out nodes
for input_idx in full_input_node_list:
input_node = self._global_node_list[input_idx]
if not is_boundary_node(input_node, self._global_input_names):
input_index_list.append(input_idx)
# Generate new states
states_list, aligned_node_list = DPStage.align_states(
input_index_list, self._global_stage_dict, self._global_node_list
)
target_node_idx, target_major_axis, target_multiplier, target_states = states_list[0]
aligned_shape = target_states.shape
self._full_states = np.zeros(aligned_shape).astype("float32").flatten()
self._full_states_idx = list(aligned_node_list)
num_states = self._full_states.shape[0]
node_time_counted = [item[0] in self._global_counted_nodes_set for item in states_list]
target_states = target_states.flatten()
src_states_list = [states_list[i][3].flatten() for i in range(1, len(states_list))]
for i in range(num_states):
target_sch_idx = (
i % (target_multiplier * aligned_shape[target_major_axis])
) // target_multiplier
if node_time_counted[0]:
new_state = 0
else:
new_state = target_states[i]
for j in range(1, len(states_list)):
src_states = src_states_list[j - 1]
src_node_idx, src_major_axis, src_multiplier, _ = states_list[j]
src_sch_idx = (
i % (src_multiplier * aligned_shape[src_major_axis])
) // src_multiplier
layout_transform_time = self._global_layout_transform_interlayer_cost[
(src_node_idx, target_node_idx)
][src_sch_idx][target_sch_idx]
if node_time_counted[j]:
new_state += layout_transform_time
else:
new_state += layout_transform_time + src_states[i]
self._full_states[i] = new_state
for i, node_counted in enumerate(node_time_counted):
if not node_counted:
self._global_counted_nodes_set.add(states_list[i][0])
self._full_states = self._full_states.reshape(aligned_shape)
# Remove dependency to reduce states
reduced_states = np.array(self._full_states)
reduced_states_transpose = [states_list[0][1]]
reduced_states_dep_list = []
self._dep = []
for i in range(len(reduced_states.shape)):
if i != states_list[0][1]:
reduced_states_transpose.append(i)
reduced_states_dep_list.append(aligned_node_list[i])
reduced_states = np.transpose(reduced_states, reduced_states_transpose)
shift = 0
for i, dep in enumerate(reduced_states_dep_list):
if dep not in self._global_dep_dict or len(self._global_dep_dict[dep]) == 1:
self._global_dep_dict.pop(dep, None)
reduced_states = np.amin(reduced_states, axis=i + 1 - shift)
shift += 1
else:
self._dep.append(dep)
self._states = reduced_states
# Update dependency
for dep in self._dep:
self._global_dep_dict[dep].remove(self._idx)
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[dep].add(child)
if len(self._global_out_nodes_dict[self._idx]) > 1:
self._global_dep_dict[self._idx] = set()
for child in self._global_out_nodes_dict[self._idx]:
self._global_dep_dict[self._idx].add(child)
@property
def dep(self):
"""Get dependency list."""
return self._dep
@property
def states(self):
"""Get states."""
return self._states
@property
def full_states(self):
"""Get complete states."""
return self._full_states
@property
def full_states_idx(self):
"""Get node index of complete states."""
return self._full_states_idx
@staticmethod
def align_states(input_index_list, stage_dict, node_list):
"""Align all input node states shapes to be the same and transpose/reshape properly.
This is used in creating multi_input operator states.
Parameters
----------
input_index_list : list of int
List of input node index.
stage_dict : dict of int to Stage
Global dictionary of node index to stage.
node_list : list of dict
List of all nodes for current graph.
Returns
-------
states_list : list of tuple
List of aligned states.
aligned_node_list : list in int
List of node index for aligned states.
"""
aligned_node_list = list(input_index_list)
states_list = []
for input_idx in input_index_list:
input_node_stage = stage_dict[input_idx]
for dep_idx in input_node_stage.dep:
if dep_idx not in aligned_node_list:
aligned_node_list.append(dep_idx)
aligned_shape = []
for idx in aligned_node_list:
aligned_shape.append(len(node_list[idx]["record_candidates"]))
for input_idx in input_index_list:
input_node_stage = stage_dict[input_idx]
input_node_shape_idx_list = [input_idx] + input_node_stage.dep
transpose_idx_list = []
reshape_list = []
major_axis = -1
for i, idx in enumerate(aligned_node_list):
if input_idx == idx:
major_axis = i
if idx in input_node_shape_idx_list:
transpose_idx_list.append(idx)
reshape_list.append(aligned_shape[i])
else:
reshape_list.append(1)
transpose_list = [input_node_shape_idx_list.index(idx) for idx in transpose_idx_list]
input_node_states = np.transpose(input_node_stage.states, tuple(transpose_list))
input_node_states = np.reshape(input_node_states, tuple(reshape_list))
input_node_states = np.broadcast_to(input_node_states, aligned_shape)
multiplier = 1
for i in range(major_axis + 1, len(aligned_shape)):
multiplier *= aligned_shape[i]
states_list.append((input_idx, major_axis, multiplier, input_node_states))
return states_list, aligned_node_list
| 16,019 | 42.064516 | 132 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Autotvm graph tuner API."""
from __future__ import absolute_import as _abs
from . import _base
from . import base_graph_tuner
from .base_graph_tuner import BaseGraphTuner
from .dynamic_programming_tuner import DPTuner
from .pbqp_tuner import PBQPTuner
| 1,042 | 39.115385 | 62 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/pbqp_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, unnecessary-list-index-lookup
"""Partitioned Boolean Quadratic Programming Tuner"""
from ._base import INVALID_LAYOUT_TIME
from .base_graph_tuner import BaseGraphTuner
from .utils import is_boundary_node, has_multiple_inputs
class PBQPTuner(BaseGraphTuner):
"""An approximation method to deal with intractably
large size of graph tuning problem.
This graph coloring algorithm mainly comes from:
Lang Hames and Bernhard Scholz.
Nearly optimal register allocation with pbqp.JMLC 2006.
LNCS, vol.4228,pp. 346-361, 2016
"""
def __init__(self, *args, **kwargs):
"""Create a partitioned boolean quadratic programming tuner."""
super(PBQPTuner, self).__init__(*args, **kwargs)
# Remove input and ruled_out nodes
input_names = self._input_shapes.keys()
for node_idx in self._out_nodes_dict:
node = self._node_list[node_idx]
if is_boundary_node(node, input_names):
for out_node_idx in self._out_nodes_dict[node_idx]:
self._in_nodes_dict[out_node_idx].remove(node_idx)
self._adj_dict = {}
for node_idx in self._in_nodes_dict:
self._adj_dict[node_idx] = list(self._in_nodes_dict[node_idx]) + list(
self._out_nodes_dict[node_idx]
)
self._record_cost_dict = {}
for key in self._in_nodes_dict:
self._record_cost_dict[key] = []
for record in self._node_list[key]["record_candidates"]:
self._record_cost_dict[key].append(record[1].costs[0])
self._max_degree = -1
self._node_degree_dict = {}
for node_idx in self._in_nodes_dict:
node_degree = self._get_degree(node_idx)
self._node_degree_dict[node_idx] = node_degree
self._max_degree = max(self._max_degree, node_degree)
self._stack = []
self._buckets = [[] for _ in range(self._max_degree + 2)]
for node_idx in sorted(self._in_nodes_dict):
node_degree = self._get_degree(node_idx)
self._buckets[node_degree].append(node_idx)
self._is_optimal = True
def _get_degree(self, node_idx):
"""Get node degree."""
return len(self._adj_dict[node_idx])
def _reorder_adj_nodes(self, node_idx):
"""Update buckets list with current adjacency list."""
for adj_node in self._adj_dict[node_idx]:
current_degree = self._get_degree(adj_node)
prev_degree = self._node_degree_dict[adj_node]
if prev_degree != current_degree:
self._buckets[prev_degree].remove(adj_node)
self._buckets[current_degree].insert(0, adj_node)
self._node_degree_dict[adj_node] = current_degree
def _remove_node(self, node_idx):
"""Remove node from graph. Update adjacency list accordingly."""
node_degree = self._get_degree(node_idx)
self._buckets[node_degree].remove(node_idx)
for adj_node in self._adj_dict[node_idx]:
self._adj_dict[adj_node].remove(node_idx)
def _insert_edge(self, node_x, node_y, adj_cost_matrix):
"""Insert an edge between two nodes."""
self._layout_transform_interlayer_cost[(node_x, node_y)] = adj_cost_matrix
self._layout_transform_interlayer_cost[(node_y, node_x)] = []
for i in range(len(adj_cost_matrix[0])):
self._layout_transform_interlayer_cost[(node_y, node_x)].append([])
for cost_vec in adj_cost_matrix:
self._layout_transform_interlayer_cost[(node_y, node_x)][i].append(cost_vec[i])
self._adj_dict[node_x].append(node_y)
self._adj_dict[node_y].append(node_x)
def _backward_insert_node(self, node_idx):
"""Reinsert node in backward pass."""
for adj_node in self._adj_dict[node_idx]:
self._adj_dict[adj_node].append(node_idx)
def _RI_reduction(self, node_idx):
"""Reduce nodes with degree 1."""
adj_node = self._adj_dict[node_idx][0]
ltf_matrix = self._layout_transform_interlayer_cost[(adj_node, node_idx)]
for i, cost_vec in enumerate(ltf_matrix):
min_cost = INVALID_LAYOUT_TIME
for j, cost in enumerate(cost_vec):
min_cost = min(min_cost, cost + self._record_cost_dict[node_idx][j])
self._record_cost_dict[adj_node][i] += min_cost
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _RII_reduction(self, node_idx):
"""Reduce nodes with degree 2."""
adj_node_x, adj_node_y = self._adj_dict[node_idx]
ltf_matrix_x = self._layout_transform_interlayer_cost[(adj_node_x, node_idx)]
ltf_matrix_y = self._layout_transform_interlayer_cost[(adj_node_y, node_idx)]
delta_matrix = [[] for _ in range(len(ltf_matrix_x))]
for i, cost_vec_x in enumerate(ltf_matrix_x):
for j, cost_vec_y in enumerate(ltf_matrix_y):
min_cost = INVALID_LAYOUT_TIME
for k in range(len(self._record_cost_dict[node_idx])):
min_cost = min(
min_cost,
cost_vec_x[k] + cost_vec_y[k] + self._record_cost_dict[node_idx][k],
)
delta_matrix[i].append(min_cost)
if adj_node_x == adj_node_y:
for i, delta_row in enumerate(delta_matrix):
self._record_cost_dict[adj_node_x][i] += delta_row[i]
elif adj_node_x in self._adj_dict[adj_node_y]:
for i, _ in enumerate(delta_matrix):
for j, delta in enumerate(delta_matrix[i]):
self._layout_transform_interlayer_cost[(adj_node_x, adj_node_y)][i][j] += delta
self._layout_transform_interlayer_cost[(adj_node_y, adj_node_x)][j][i] += delta
else:
self._insert_edge(adj_node_x, adj_node_y, delta_matrix)
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _RN_reduction(self, node_idx):
"""Reduce nodes with degree greater than 2."""
min_cost = INVALID_LAYOUT_TIME
record_idx = -1
for i, record_cost in enumerate(self._record_cost_dict[node_idx]):
current_cost = record_cost
for adj_node in self._adj_dict[node_idx]:
ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)]
adj_record_cost = list(self._record_cost_dict[adj_node])
for j, ltf_cost in enumerate(ltf_matrix[i]):
adj_record_cost[j] += ltf_cost
current_cost += min(adj_record_cost)
if current_cost < min_cost:
min_cost = current_cost
record_idx = i
if record_idx < 0:
raise RuntimeError(
f"Can't find a soltuion for node {node_idx} when applying RN reduction"
)
self._optimal_record_dict[node_idx] = record_idx
self._is_optimal = False
for adj_node in self._adj_dict[node_idx]:
ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)]
for i, ltf_cost in enumerate(ltf_matrix[record_idx]):
self._record_cost_dict[adj_node][i] += ltf_cost
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _forward(self):
"""Forward pass in PBQP to reduce nodes."""
while True:
if self._buckets[1]:
node_idx = self._buckets[1][0]
self._RI_reduction(node_idx)
elif self._max_degree >= 2 and self._buckets[2]:
node_idx = self._buckets[2][0]
self._RII_reduction(node_idx)
elif self._max_degree >= 3:
max_degree_node = -1
for i in range(self._max_degree, 2, -1):
if self._buckets[i]:
max_degree_node = self._buckets[i][0]
self._RN_reduction(max_degree_node)
break
if max_degree_node < 0:
break
else:
break
def _backward(self):
"""Backward pass in PBQP to generate optimal solution."""
# Solve nodes left in the forward graph
for node_idx in self._buckets[0]:
record_costs = self._record_cost_dict[node_idx]
min_cost = min(record_costs)
self._optimal_record_dict[node_idx] = record_costs.index(min_cost)
# Solve nodes with one or two degrees
for node_idx in reversed(self._stack):
self._backward_insert_node(node_idx)
if node_idx not in self._optimal_record_dict:
record_costs = list(self._record_cost_dict[node_idx])
for adj_node in self._adj_dict[node_idx]:
adj_optimal_idx = self._optimal_record_dict[adj_node]
for i, _ in enumerate(record_costs):
record_costs[i] += self._layout_transform_interlayer_cost[
(node_idx, adj_node)
][i][adj_optimal_idx]
min_cost = min(record_costs)
self._optimal_record_dict[node_idx] = record_costs.index(min_cost)
def run(self, **kwargs):
"""Run partitioned boolean quadratic programming tuner."""
self._logger.info("Start to run PBQP algorithm...")
# Define virtual record lists and layout transformaton matrices
# for multi-input nodes.
input_names = self._input_shapes.keys()
temp = {}
for key, val in self._in_nodes_dict.items():
target_input_idx = -1
target_input_pos = -1
if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op):
for i, item in enumerate(val):
node = self._node_list[item]
if not is_boundary_node(node, input_names):
target_input_idx = item
target_input_pos = i
break
# Skip boundary operator
if target_input_idx < 0:
continue
temp[(target_input_idx, key)] = []
record_candidates = self._node_list[target_input_idx]["record_candidates"]
for j in range(len(record_candidates)):
temp[(target_input_idx, key)].append([])
for k in range(len(record_candidates)):
temp[(target_input_idx, key)][j].append(
0 if j == k else INVALID_LAYOUT_TIME
)
for j in range(target_input_pos + 1, len(val)):
input_idx = val[j]
input_node = self._node_list[input_idx]
if is_boundary_node(input_node, input_names):
continue
temp[(input_idx, key)] = self._layout_transform_interlayer_cost[
(input_idx, target_input_idx)
]
self._layout_transform_interlayer_cost.update(temp)
# Create reverse layout transformation matrices
temp = {}
for idx_pair, ltf_matrix in self._layout_transform_interlayer_cost.items():
reverse_key = (idx_pair[1], idx_pair[0])
reverse_matrix = [[] for _ in range(len(ltf_matrix[0]))]
for i, _ in enumerate(ltf_matrix):
for j, ltf in enumerate(ltf_matrix[i]):
reverse_matrix[j].append(ltf)
temp[reverse_key] = reverse_matrix
self._layout_transform_interlayer_cost.update(temp)
self._forward()
self._backward()
is_optimal = "optimal" if self._is_optimal else "sub-optimal"
msg = f"Finished PBQPExecutor run. Got {is_optimal} solution."
self._logger.info(msg)
| 12,926 | 43.730104 | 99 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/utils/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=eval-used,invalid-name,too-many-arguments
"""Utility functions"""
import tvm
from tvm import relay
from tvm.relay import transform
def has_multiple_inputs(node_list, node_idx, input_names, opt_out_op):
"""Check whether a node has multiple input nodes
except variable nodes.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
node_idx : int
Node index to be checked.
input_names : list of str
List of input names of graph.
Returns
-------
out : bool
Whether the specified node has multiple input nodes
"""
num_inputs = 0
node = node_list[node_idx]
for in_idx in node["inputs"]:
in_idx = in_idx[0]
in_node = node_list[in_idx]
# Exclude parameter nodes
if in_node["op"] is not None and in_node["op"].name in opt_out_op:
increase = False
for t_idx in in_node["inputs"]:
increase = has_multiple_inputs(node_list, t_idx[0], input_names, opt_out_op)
if increase:
num_inputs += 1
elif in_node["op"] is not None or ("name" in in_node and in_node["name"] in input_names):
num_inputs += 1
return num_inputs > 1
def is_boundary_node(node_entry, input_names):
"""Whether a node is a boundary node.
Currently input node and nodes in LAYOUT_FIXED_OP are
counted as boundary.
Parameters
----------
node_entry : dict
Node entry.
input_names : list of str
List of input names of graph.
Returns
-------
out : bool
whether node is a boundary node.
"""
# Operators dependent on original layouts.
_LAYOUT_FIXED_OP = [
relay.op.get(name)
for name in (
"nn.batch_flatten",
"transpose",
"reshape",
"vision.multibox_prior",
"vision.multibox_transform_loc",
"where",
"vision.non_max_suppression",
"strided_slice",
)
]
out = node_entry["op"] in _LAYOUT_FIXED_OP or (
"name" in node_entry and node_entry["name"] in input_names
)
return out
def is_skipped_node(node_entry):
"""Whether a node is not counted.
Parameters
----------
node_entry : dict
Node entry.
Returns
-------
out : bool
whether node is skipped.
"""
# Operators not counted in graph tuner.
return isinstance(node_entry["node"], relay.Tuple)
def bind_inputs(expr, input_shapes=None, input_dtypes="float32"):
"""Bind input variables of a relay function expression
to new shapes and/or dtypes.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
input_shapes : dict of str to tuple of int, optional
Input shapes.
input_dtypes : str or dict of str to str, optional
Input dtypes.
Returns
-------
out : tvm.relay.Expr.Function
Bind relay function expression.
"""
if input_shapes is None:
return expr
if isinstance(input_dtypes, str):
input_dtypes = {key: input_dtypes for key in input_shapes.keys()}
updated_input_dict = {}
for input_name in input_shapes.keys():
updated_input = relay.var(
input_name, shape=input_shapes[input_name], dtype=input_dtypes[input_name]
)
updated_input_dict[input_name] = updated_input
rebind_dict = {}
for var in expr.params:
if var.name_hint in updated_input_dict:
rebind_dict[var] = updated_input_dict[var.name_hint]
updated_expr = relay.expr.bind(expr, rebind_dict)
mod = tvm.IRModule.from_expr(updated_expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(updated_expr, relay.Function) else entry.body
| 4,682 | 28.45283 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access
"""API for graph traversing."""
import threading
import re
import tvm
from tvm import relay, autotvm
from tvm.relay import transform
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
from tvm.relay.function import Function
from tvm.relay.ty import TupleType, TensorType
from tvm.autotvm.task import TaskExtractEnv
from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node
from .._base import OPT_OUT_OP
def expr2graph(expr, target_ops, node_dict, node_list, tvm_target):
"""Convert relay expr to graph data structure
and fetch workloads of target operators.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
target_ops: List of tvm.ir.Op
List of target relay ops
node_dict : dictionary from tvm.relay.Expr to int
Dictionary to record node index
node_list : list of dictionary
List of nodes which contains all expr in the input relay function.
Each node will be stored as a dictionary in the format of
{"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type],
"name": str, "workloads": [tuple], "topi_op": [function]}
tvm_target : tvm.target
The TVM target object.
"""
# TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact
# that # autotvm tasks == # ops. But this won't be true after having relay op
# strategy. We need to find a solution to fix this.
env = TaskExtractEnv.get(allow_duplicate=True)
env.reset(target_ops)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target)
task_pos = 0
for node_entry in node_list:
if node_entry["op"] in target_ops:
task_name, args = env.task_collection[task_pos]
task = autotvm.task.create(task_name, args, target=tvm_target)
node_entry["workloads"] = [task.workload]
node_entry["topi_op"] = [task_name]
task_pos += 1
def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def _replace_device_with_tracing(target):
"""This is to replace -device=XXX with -device=tracing in the tvm_target string.
It is a stand-along function for testability.
We need to have device=tracing in order to fetch the workloads, it is not used
for anything beyond that so it is safe to override the device here only."""
target = str(target)
if "-device" in target:
return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ")
return target + " -device=tracing"
def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target):
"""Implementation to convert relay expr to graph data structure"""
def _traverse_expr(node):
if node in node_dict:
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None}
if isinstance(node, Call):
op = node.op
node_entry["op"] = node.op
for arg in node.args:
in_node_idx = node_dict[arg]
if isinstance(arg, (Tuple, TupleGetItem)):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
infer_out = _infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
node_entry["types"].append(out_type)
elif isinstance(out_type, TupleType):
for tupe_type in out_type.fields:
node_entry["types"].append(tupe_type)
else:
raise RuntimeError(
f"Unsupported output type {type(out_type)} in operator {op.name}"
)
# Utilize tracing target to fetch workload with topo-order.
# Since we only need workload, dummy target can be used to
# create task.
if op in target_ops:
params = []
for i, input_idx in enumerate(node_entry["inputs"]):
input_node_entry = node_list[input_idx[0]]
input_type = input_node_entry["types"][input_idx[1]]
if not isinstance(input_node_entry["node"], (Var, Constant, Call)):
raise RuntimeError(
"Graph tuner can only tune target "
"operators with input node of type "
"relay.expr.Var/Constant/Call. Now "
"find a target op %s with input type %s"
% (op, str(type(input_node_entry["node"])))
)
free_var = relay.Var(f"var_{i}", input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
relay.backend.te_compiler.get().clear()
tracing_target = _replace_device_with_tracing(tvm_target)
build_thread = threading.Thread(target=relay.build, args=(mod, tracing_target))
build_thread.start()
build_thread.join()
elif isinstance(node, Var):
node_entry["name"] = node.name_hint
node_entry["types"] = [node.type_annotation]
elif isinstance(node, Function):
# Ignore root node since it equals to input function expression
if node != expr:
_expr2graph_impl(node, target_ops, node_dict, node_list, tvm_target)
return
elif isinstance(node, TupleGetItem):
in_node_idx = node_dict[node.tuple_value]
node_entry["inputs"].append([in_node_idx, node.index, 0])
elif isinstance(node, Tuple):
for tuple_item in node:
in_node_idx = node_dict[tuple_item]
if isinstance(tuple_item, TupleGetItem):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
elif isinstance(tuple_item, Tuple):
raise RuntimeError("Graph tuner doesn't support nested tuple.")
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
elif isinstance(node, Constant):
node_entry["name"] = "Constant_" + str(node_index)
node_entry["types"] = [node.checked_type]
elif isinstance(node, tvm.ir.Op):
return
else:
raise RuntimeError(f"Not supported relay node type in graph tuning: {type(node)}")
node_dict[node] = node_index
node_list.append(node_entry)
relay.analysis.post_order_visit(expr, _traverse_expr)
def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names):
"""Given a node_list in relay function and a node index, return the
closest ancestor which has op_name as operator name or is multi_input operator.
If node has multiple inputs, multiple ancestor nodes will be returned.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
visited_dict : dict of int to int
Nodes and corresponding ancestors which have been visited.
target_ops: List of str
List of target relay base op name
node_idx : int
Input node index.
input_names : list of str
Names of graph input nodes.
Returns
-------
out : list of int
List of ancestor node index.
"""
if node_idx in visited_dict:
return visited_dict[node_idx]
node = node_list[node_idx]
if is_boundary_node(node, input_names):
return [node_idx]
node_direct_ancestor = []
for item_idx in node["inputs"]:
item = node_list[item_idx[0]]
is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP)
if item["op"] in target_ops or is_multiple_inputs:
node_direct_ancestor.append(item_idx[0])
else:
tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names)
for tmp_item in tmp:
if tmp_item not in node_direct_ancestor:
node_direct_ancestor.append(tmp_item)
visited_dict[node_idx] = node_direct_ancestor
return node_direct_ancestor
def get_in_nodes(node_list, target_ops, input_names):
"""Create a dictionary mapping from op_name nodes or multi-input
nodes to closest input ancestors.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
target_ops: List of str
List of target relay op
input_names : list of str
Names of graph input nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest input ancestors.
"""
visited_dict = {}
in_node_dict = {}
for i, node in enumerate(node_list):
if is_boundary_node(node, input_names) or is_skipped_node(node):
continue
get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names)
for key, val in visited_dict.items():
node = node_list[key]
is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP)
if node["op"] in target_ops or is_multiple_inputs:
in_node_dict[key] = val
# Reduce boundary nodes
out_node_dict = get_out_nodes(in_node_dict)
has_reduced_node = True
while has_reduced_node:
boundary_nodes = []
for key, val in in_node_dict.items():
node = node_list[key]
is_boundary = True
# Target ops can't be boundary nodes
if node["op"] not in target_ops:
for input_idx in val:
in_node = node_list[input_idx]
if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict:
is_boundary = False
else:
val.remove(input_idx)
if is_boundary:
boundary_nodes.append(key)
if boundary_nodes:
for idx in boundary_nodes:
if idx in in_node_dict:
del in_node_dict[idx]
else:
has_reduced_node = False
# Remove empty nodes to ignore pre-computed sub-graph
has_empty_node = True
while has_empty_node:
empty_nodes = []
for key, val in in_node_dict.items():
if not val:
empty_nodes.append(key)
if empty_nodes:
has_empty_node = True
for node in empty_nodes:
del in_node_dict[node]
if node in out_node_dict:
for out_node in out_node_dict[node]:
in_node_dict[out_node].remove(node)
else:
has_empty_node = False
return in_node_dict
def get_out_nodes(in_node_dict):
"""Create output dictionary from input dictionary.
Parameters
----------
in_node_dict : dict of int to list of int
Dictionary maps node index to closest input ancestors.
It can be created with get_in_nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest output nodes.
"""
out_node_dict = {}
for key in in_node_dict:
out_node_dict[key] = []
for key, val in in_node_dict.items():
for item in val:
if item in out_node_dict:
out_node_dict[item].append(key)
else:
out_node_dict[item] = [key]
return out_node_dict
| 12,994 | 37.791045 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/graph_tuner/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Graph tuner utility functions"""
from __future__ import absolute_import
from . import traverse_graph
from . import utils
from .traverse_graph import expr2graph, get_direct_ancestor, get_in_nodes, get_out_nodes
from .utils import has_multiple_inputs, is_boundary_node, bind_inputs
| 1,104 | 41.5 | 88 | py |
tvm | tvm-main/python/tvm/autotvm/measure/executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Abstraction for asynchronous job execution """
class Executor(object):
"""
Base abstract executor interface for asynchronous job submission.
Allows submit asynchronous jobs and returns the Future object.
"""
# timeout for jobs that may hang
DEFAULT_TIMEOUT = 120
def submit(self, func, *args, **kwargs):
"""
Pass task (function, arguments) to the Executor.
Parameters
----------
func : callable
function to be run by a worker
args : list or tuple, optional
arguments passed to the function
kwargs : dict, optional
The keyword arguments
Returns
-------
future : Future
Future object wrapping the task which can be used to
collect the task's result.
"""
raise NotImplementedError()
class Future(object):
"""
Base class of the future object.
The implementations can return object of subclass of this.
This objects encapsulates the asynchronous execution of task
submitted to another thread, or another worker for execution.
Future objects store the state of tasks--can be polled for
result or a blocking call to retrieve the result can be used.
"""
def done(self):
"""
Return True if job was successfully cancelled or finished running.
"""
raise NotImplementedError()
def get(self, timeout=None):
"""
Get the result. This will block until the result is available.
Parameters
----------
timeout : int or float, optional
Maximum number of seconds to wait before it timeouts.
If not specified, it means we block until the result is available.
Returns
-------
result : Any
The result returned by the submitted function.
Raises
------
TimeoutError : if the result call timeouts.
"""
raise NotImplementedError()
class FutureError(RuntimeError):
"""Base error class of all future events"""
# pylint:disable=redefined-builtin
class TimeoutError(FutureError):
"""Error raised when a task is timeout."""
class ExecutionError(FutureError):
"""
Error raised when future execution crashes or failed.
"""
| 3,113 | 29.23301 | 78 | py |
tvm | tvm-main/python/tvm/autotvm/measure/measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=pointless-string-statement,consider-using-enumerate,invalid-name
"""User facing API for specifying how to measure the generated code"""
import enum
import multiprocessing
from collections import namedtuple
class MeasureInput(namedtuple("MeasureInput", ["target", "task", "config"])):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
target : tvm.target.Target
The target device
task : task.Task
Task function
config : ConfigEntity
Specific configuration.
"""
class MeasureResult(namedtuple("MeasureResult", ["costs", "error_no", "all_cost", "timestamp"])):
"""
Stores all the results of a measurement
Parameters
----------
costs: Array of float or Array of Exception
If no error occurs during measurement, it is an array of measured running times.
If an error occurs during measurement, it is an array of the exception objections.
error_no: int
Denote error type, defined by MeasureErrorNo
all_cost: float
All cost of this measure, including rpc, compilation, test runs
timestamp: float
The absolute time stamp when we finish measurement.
"""
def __repr__(self):
error_no_str = (
str(MeasureErrorNo(self.error_no))
if isinstance(self.error_no, (MeasureErrorNo, int))
else str(self.error_no)
)
return (
f"{self.__class__.__name__}(costs={self.costs!r}, error_no={error_no_str}, "
f"all_cost={self.all_cost}, timestamp={self.timestamp!r})"
)
class MeasureErrorNo(enum.IntEnum):
"""Error type for MeasureResult"""
NO_ERROR = 0 # no error
INSTANTIATION_ERROR = 1 # actively detected error in instantiating a template with a config
COMPILE_HOST = 2 # error when compiling code on host (e.g. tvm.build)
COMPILE_DEVICE = 3 # error when compiling code on device (e.g. OpenCL JIT on the device)
RUNTIME_DEVICE = 4 # error when run program on device
WRONG_ANSWER = 5 # answer is wrong when compared to a golden output
BUILD_TIMEOUT = 6 # timeout during compilation
RUN_TIMEOUT = 7 # timeout during run
UNKNOWN_ERROR = 8 # unknown error
class Builder(object):
"""Builder that builds programs in tuning
Parameters
----------
timeout: float, optional
The timeout of a build task
n_parallel: int, optional
The number of tasks submitted in parallel
By default it will use all cpu cores
build_kwargs: dict, optional
Keyword args given to the build function.
"""
def __init__(self, timeout=10, n_parallel=None, build_kwargs=None):
self.timeout = timeout
self.n_parallel = n_parallel or multiprocessing.cpu_count()
self.user_build_kwargs = build_kwargs if build_kwargs is not None else {}
self.runner_build_kwargs = None
self.task = None
def set_task(self, task, build_kwargs=None):
"""
Initialize for a new tuning task
Parameters
----------
task: Task
The tuning task
build_kwargs: dict, optional
The additional kwargs for build function
"""
self.task = task
self.build_kwargs = dict(build_kwargs.items()) if build_kwargs is not None else {}
if any(k in self.build_kwargs for k in self.user_build_kwargs):
logging.warn(
"Overriding these runner-supplied kwargs with user-supplied:\n%s",
"\n".join(
f" * {k}: from {build_kwargs[k]!r} to {self.user_build_kwargs[k]!r}"
for k in sorted([k for k in build_kwargs if k in self.user_build_kwargs])
),
)
for k, v in self.user_build_kwargs.items():
self.build_kwargs[k] = v
def build(self, measure_inputs):
"""Build programs
Parameters
----------
measure_inputs: List of MeasureInput
The measure input
Returns
-------
build_results: List of BuildResult
The build result.
"""
raise NotImplementedError()
class Runner(object):
"""Runner that runs and measures the time cost of a generated program in tuning
Parameters
----------
timeout: float, optional
The timeout of a build task
n_parallel: int, optional
The number of tasks submitted in parallel
By default it will use all cpu cores
"""
def __init__(self, timeout=5, n_parallel=None):
self.timeout = timeout
self.n_parallel = n_parallel or multiprocessing.cpu_count()
self.task = None
def set_task(self, task):
"""
Initialize for a new tuning task
Parameters
----------
task: Task
The tuning task
"""
self.task = task
def get_build_kwargs(self):
"""
Get device specific build arguments (e.g. maximum shared memory size)
Returns
----------
kwargs: dict
The additional keyword arguments
"""
raise NotImplementedError()
def run(self, measure_inputs, build_results):
"""Run amd measure built programs
Parameters
----------
measure_inputs: List of MeasureInput
The raw measure input
build_results: List of BuildResults
The build results
Returns
-------
measure_results: List of MeasureResult
The final results of measurement
"""
raise NotImplementedError()
def measure_option(builder, runner):
"""
Set options for measure. To measure a config, we will build it and run it.
So we have to set options for these two steps.
They have their own options on timeout, parallel, etc.
Parameters
----------
builder: Builder
Specify how to build programs
runner: Runner
Specify how to run programs
Examples
--------
# example setting for using local devices
>>> measure_option = autotvm.measure_option(
>>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation
>>> runner=autotvm.LocalRunner( # measure them sequentially
>>> number=10,
>>> timeout=5)
>>> )
# example setting for using remote devices
>>> measure_option = autotvm.measure_option(
>>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation
>>> runner=autotvm.RPCRunner(
>>> 'rasp3b', 'locahost', 9190, # device key, host and port of the rpc tracker
>>> number=4,
>>> timeout=4) # timeout of a run on the device. RPC request waiting time is excluded.
>>>)
Note
----
To make measurement results accurate, you should pick the correct value for the argument
`number` and `repeat` in Runner(). Some devices need a certain minimum running time to
"warm up," such as GPUs that need time to reach a performance power state.
Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended.
The typical value for NVIDIA GPU is 150 ms.
"""
# pylint: disable=import-outside-toplevel
from .measure_methods import LocalBuilder, LocalRunner
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
opt = {
"builder": builder,
"runner": runner,
}
return opt
def create_measure_batch(task, option):
"""Get a standard measure_batch function.
Parameters
----------
task: tvm.autotvm.task.Task
The tuning task
option: dict
The option for measuring generated code.
You should use the return value of function :any:`measure_option` for this argument.
Returns
-------
measure_batch: callable
a callback function to measure a batch of configs
"""
builder = option["builder"]
runner = option["runner"]
attach_objects = runner.set_task(task)
# feed device related information from runner to builder
# (e.g. max shared memory for validity checking)
build_kwargs = runner.get_build_kwargs()
builder.set_task(task, build_kwargs)
def measure_batch(measure_inputs):
build_results = builder.build(measure_inputs)
results = runner.run(measure_inputs, build_results)
return results
measure_batch.n_parallel = builder.n_parallel
measure_batch.attach_objects = attach_objects
return measure_batch
| 9,650 | 31.494949 | 97 | py |
tvm | tvm-main/python/tvm/autotvm/measure/measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import contextlib
import logging
import os
import shutil
import tempfile
import threading
import time
import traceback
import typing
import warnings
from collections import namedtuple
from random import getrandbits
import tvm._ffi
import tvm.ir.transform
from tvm import nd
from tvm import rpc as _rpc
from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope
from tvm.contrib import ndk, stackvm, tar
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.driver import build
from tvm.error import TVMError
from tvm.target import Target
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from ..utils import get_const_tuple
from .measure import Builder, MeasureErrorNo, MeasureResult, Runner
logger = logging.getLogger("autotvm")
class BuildResult(namedtuple("BuildResult", ("filename", "arg_info", "error", "time_cost"))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_kwargs: dict
If supplied, additional kwargs passed to build_func. Overrides any build_kwargs supplied
by the Runner.
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If id 'stackvm', use function for stackvm
If is callable, use it as custom build function, expect lib_format field.
do_fork: bool
If False, do not fork when building. Requires n_parallel=1.
runtime: Optional[Runtime]
Specify the runtime to generate artifacts for
"""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
super(LocalBuilder, self).__init__(timeout, n_parallel, build_kwargs)
if isinstance(build_func, str):
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
elif build_func == "stackvm":
build_func = stackvm.build
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _WrappedBuildFunc(build_func, runtime)
if not do_fork:
assert n_parallel in (
None,
1,
), f"if do_fork=False, need n_parallel=None or 1; got {n_parallel}"
self.executor = PopenPoolExecutor(
timeout=timeout, initializer=reset_global_scope, initargs=(AutotvmGlobalScope.current,)
)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i : i + self.n_parallel]:
ret = self.executor.submit(self.build_func, inp, self.tmp_dir, **self.build_kwargs)
futures.append(ret)
for future in futures:
try:
res = future.result()
if res.error is not None:
assert len(res.error) == 2, (
f"BuildResult errors should be a 2-tuple, but it is a {len(res.error)}"
"-tuple. This should not happen!"
)
tb, exception = res.error
# instantiation error
if isinstance(exception, InstantiationError):
res = MeasureResult(
(tb, exception),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost,
time.time(),
)
else:
if "InstantiationError" in str(exception):
msg = str(exception)
try:
msg = msg.split("\n")[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
res = MeasureResult(
(tb, InstantiationError(msg)),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost,
time.time(),
)
else: # tvm error
res = MeasureResult(
(tb, res.error),
MeasureErrorNo.COMPILE_HOST,
res.time_cost,
time.time(),
)
except TimeoutError as ex:
tb = traceback.format_exc()
res = MeasureResult(
(tb, ex), MeasureErrorNo.BUILD_TIMEOUT, self.timeout, time.time()
)
except ChildProcessError as ex:
tb = traceback.format_exc()
res = MeasureResult(
(tb, ex), MeasureErrorNo.RUNTIME_DEVICE, self.timeout, time.time()
)
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a RPCRunner measurement task
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
module_loader : ModuleLoader
If given, a context manager that loads the module to be timed into the remote runtime.
If not given, default_module_loader is used.
"""
def __init__(
self,
key,
host,
port,
priority=1,
timeout=10,
n_parallel=None,
number=4,
repeat=3,
min_repeat_ms=0,
cooldown_interval=0.1,
enable_cpu_cache_flush=False,
module_loader=None,
):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self._ref_input = None
self.enable_cpu_cache_flush = enable_cpu_cache_flush
self.cooldown_interval = cooldown_interval
self.module_loader = module_loader
self.executor = PopenPoolExecutor(
timeout=timeout * (self.n_parallel + 1),
initializer=reset_global_scope,
initargs=(AutotvmGlobalScope.current,),
)
@property
def ref_input(self):
"""
Fixed input for tuning special operators, e.g., sparse operators
requiring indices as input.
"""
return self._ref_input
@ref_input.setter
def ref_input(self, val):
if val is not None:
warnings.warn(
"You are specifying fixed input for tuning the operator. "
"Be sure your input always fits the operator. Some "
"operators may conduct layout transformation during tuning, "
"thus can lead to unexpected behaviors. ",
RuntimeWarning,
)
self._ref_input = val
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError(
"Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status."
)
def get_build_kwargs(self):
kwargs = {"checks": {}}
if (
"cuda" in self.task.target.keys
or "opencl" in self.task.target.keys
or "rocm" in self.task.target.keys
or "vulkan" in self.task.target.keys
):
remote = request_remote(self.key, self.host, self.port)
dev = remote.device(str(self.task.target), 0)
max_dims = dev.max_thread_dimensions
kwargs["checks"]["gpu"] = {
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"max_thread_x": max_dims[0],
"max_thread_y": max_dims[1],
"max_thread_z": max_dims[2],
}
if "hexagon" in self.task.target.keys:
kwargs["checks"]["hexagon"] = {"vtcm_capacity": self.task.target.vtcm_capacity}
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_kwargs = dict(
device_key=self.key,
host=self.host,
port=self.port,
priority=self.priority,
timeout=self.timeout,
)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(
measure_inputs[i : i + self.n_parallel], build_results[i : i + self.n_parallel]
):
module_loader = (
self.module_loader
if self.module_loader is not None
else default_module_loader()
)
ret = self.executor.submit(
run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_kwargs,
self.ref_input,
self.enable_cpu_cache_flush,
module_loader,
)
futures.append(ret)
for future in futures:
try:
res = future.result()
results.append(res)
except Exception as ex: # pylint: disable=broad-except
tb = traceback.format_exc()
results.append(
MeasureResult(
(tb, ex), MeasureErrorNo.RUN_TIMEOUT, self.timeout, time.time()
)
)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(
self,
timeout=10,
number=4,
repeat=3,
min_repeat_ms=0,
cooldown_interval=0.1,
enable_cpu_cache_flush=False,
module_loader=None,
):
super(LocalRunner, self).__init__(
"",
None,
None,
0,
timeout=timeout,
n_parallel=1,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
enable_cpu_cache_flush=enable_cpu_cache_flush,
module_loader=module_loader,
)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.server import Server
from ...rpc.tracker import Tracker
self.task = task
tracker = Tracker(port=9000, port_end=10000, silent=True)
device_key = f"$local$device${tracker.port}"
server = Server(
port=9000,
port_end=10000,
key=device_key,
silent=True,
tracker_addr=("127.0.0.1", tracker.port),
)
self.key = device_key
self.host = "127.0.0.1"
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, runtime=None, checks=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
target, task.target_host = Target.canon_target_and_host(target, task.target_host)
checks = checks or {}
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
# if target is vta, we need to use vta build
if (
hasattr(measure_input.target, "device_name")
and measure_input.target.device_name == "vta"
):
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
current_pass_context: tvm.ir.transform.PassContext = (
tvm.ir.transform.PassContext.current()
)
current_config = dict(current_pass_context.config)
if build_option is not None:
current_config.update(build_option)
if "tir.add_lower_pass" in current_config:
current_add_lower_pass = list(current_config["tir.add_lower_pass"])
else:
current_add_lower_pass = []
if checks.get("gpu"):
current_add_lower_pass.append((2, gpu_verify_pass(**checks.get("gpu"))))
if checks.get("hexagon"):
current_add_lower_pass.append((2, vtcm_verify_pass(**checks.get("hexagon"))))
current_config["tir.add_lower_pass"] = current_add_lower_pass
with tvm.ir.transform.PassContext(
opt_level=current_pass_context.opt_level,
required_pass=current_pass_context.required_pass,
disabled_pass=current_pass_context.disabled_pass,
instruments=current_pass_context.instruments,
config=current_config,
):
func = build(s, args, target=target, runtime=runtime)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
class _WrappedBuildFunc:
"""
Wrap build_func to a function that can be used in measure.
Note: this is a class instead of a closure so that it can be pickled when
using multiprocessing.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format".
runtime : Optional[Runtime]
The runtime to generate artifacts for
Returns
-------
wrapped_build_func : callable
The wrapped build function
"""
def __init__(self, build_func, runtime=None):
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
self.build_func = build_func
self.runtime = runtime
def __call__(self, measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(
tmp_dir, f"tmp_func_{getrandbits(64):0x}.{self.build_func.output_format}"
)
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
if self.build_func.output_format == ".model-library-format":
# Late import to preserve autoTVM with USE_MICRO OFF
try:
from tvm import micro # pylint: disable=import-outside-toplevel
except ImportError:
raise ImportError("Requires USE_MICRO")
micro.export_model_library_format(func, filename)
else:
func.export_library(filename, self.build_func)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
return BuildResult(None, None, (tb, e), time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
ModuleLoader = typing.Callable[
[dict, dict], typing.ContextManager[typing.Tuple[tvm.rpc.RPCSession, tvm.runtime.Module]]
]
def run_through_rpc(
measure_input,
build_result,
number,
repeat,
min_repeat_ms,
cooldown_interval,
remote_kwargs,
ref_input,
enable_cpu_cache_flush=False,
module_loader=None,
):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_kwargs: dict
Passed to module_loader(). Ultimately, keyword args to request_remote().
ref_input: List of np.ndarray
The reference input used for tuning. Empty for randomly filled input.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
module_loader: ModuleLoader
A function that returns a ContextManager used to establish and teardown the remote session.
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
with module_loader(remote_kwargs, build_result) as (remote, mod):
dev = remote.device(str(measure_input.target), 0)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = mod.time_evaluator(
mod.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
if ref_input:
args = [nd.array(x, device=dev) for x in ref_input]
else:
try:
random_fill = remote.get_function("tvm.contrib.random.random_fill")
except AttributeError:
raise AttributeError(
"Please make sure USE_RANDOM is ON in the config.cmake "
"on the remote devices"
)
args = [nd.empty(x[0], x[1], dev) for x in build_result.arg_info]
if "scatter" not in measure_input.task.name:
# the index tensor of scatter op cannot be randomly initialized
for arg in args:
random_fill(arg)
dev.sync()
costs = time_f(*args).results
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[: msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[: msg.index("CUDA Source")]
costs = (traceback.format_exc(), RuntimeError(msg[:1024]))
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
class DefaultModuleLoader:
"""See default_module_loader(). A pickleable emulation of the original function closure."""
def __init__(self, pre_load_function=None) -> None:
self.pre_load_function = pre_load_function
@contextlib.contextmanager
def __call__(self, remote_kwargs, build_result):
remote = request_remote(**remote_kwargs)
if self.pre_load_function is not None:
self.pre_load_function(remote, build_result)
remote.upload(build_result.filename)
try:
yield remote, remote.load_module(os.path.split(build_result.filename)[1])
finally:
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + ".so")
remote.remove("")
def default_module_loader(pre_load_function=None):
"""Returns a default function that can be passed as module_loader to run_through_rpc.
Parameters
----------
pre_load_function : Optional[Function[tvm.rpc.Session, tvm.runtime.Module]]
Invoked after a session is established and before the default code-loading RPC calls are
issued. Allows performing pre-upload actions, e.g. resetting the remote runtime environment.
Returns
-------
DefaultModuleLoader :
A callable that can be passed as module_loader to run_through_rpc.
"""
# This was a function with a closure before but that couldn't be pickled!
# We need pickle to work for using python's multiprocessing on some platforms.
return DefaultModuleLoader(pre_load_function)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ["TVM_TRACKER_HOST"]
port = port or int(os.environ["TVM_TRACKER_PORT"])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority, session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
logger.debug("waiting for device...")
remote = request_remote(device_key, host, port, priority)
dev = remote.device(str(target))
while not dev.exist: # wait until we get an available device
pass
logger.debug("device available")
t = threading.Thread(target=_check)
t.start()
t.join(timeout)
remote = request_remote(device_key, host, port, priority)
dev = remote.device(str(target))
return dev.exist
def set_cuda_target_arch(arch):
"""THIS API IS DEPRECATED.
set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
raise ValueError(
"The API 'autotvm.measure.set_cuda_target_arch' is deprecated."
"Try specifying it by adding '-arch=sm_xx' to your target, such as 'cuda -arch=sm_86'."
"See https://github.com/apache/tvm/pull/9544 for the upgrade guide."
)
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
def vtcm_verify_pass(**kwargs):
"""Verify the validity of a hexagon kernel.
This pass will check vtcm memory usage.
"""
def verify_pass(f, *_):
sizes = tvm.tir.analysis.calculate_allocated_bytes(f)
vtcm_capacity = kwargs.get("vtcm_capacity", 0)
vtcm_allocated = sizes.get("global.vtcm", 0)
if 0 < vtcm_capacity < vtcm_allocated:
raise InstantiationError("Skipped because of invalid vtcm memory usage limit")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
| 32,003 | 36.041667 | 100 | py |
tvm | tvm-main/python/tvm/autotvm/measure/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Distributed executor infrastructure to scale up the tuning"""
from .measure import (
MeasureInput,
MeasureResult,
MeasureErrorNo,
measure_option,
create_measure_batch,
)
from .measure_methods import (
LocalBuilder,
LocalRunner,
RPCRunner,
default_module_loader,
request_remote,
)
from .executor import Executor
| 1,140 | 32.558824 | 64 | py |
tvm | tvm-main/python/tvm/exec/measure_peak.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""measure bandwidth and compute peak
e.g.
python3 -m tvm.exec.measure_peak --target cuda --rpc-host 127.0.0.1 --rpc-port 9090
python3 -m tvm.exec.measure_peak --target opencl --target-host "llvm -mtriple=aarch64-linux-gnu" \
--rpc-host $TVM_OPENCL_DEVICE_HOST --rpc-port 9090
"""
import argparse
import logging
from tvm.target import Target
from ..contrib.peak import measure_peak_all
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("--target", type=str, default="llvm", help="The build target")
parser.add_argument(
"--target-host", type=str, default=None, help="The host code compilation target"
)
parser.add_argument(
"--rpc-host", type=str, default="127.0.0.1", help="the hostname of the server"
)
parser.add_argument("--rpc-port", type=int, default=9090, help="The port of the RPC")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
args.target, args.target_host = Target.canon_target_and_host(args.target, args.target_host)
measure_peak_all(args.target, args.target_host, args.rpc_host, args.rpc_port)
if __name__ == "__main__":
main()
| 1,969 | 36.169811 | 98 | py |
tvm | tvm-main/python/tvm/exec/autotvm_log_editor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Pick best log entries from a large file and store them to a small file"""
import argparse
import os
import logging
import warnings
from .. import autotvm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--act", type=str, choices=["pick-best"], required=True, help="The action")
parser.add_argument("--i", type=str, help="The input file or directory", required=True)
parser.add_argument("--o", type=str, help="The output file")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.act == "pick-best":
if os.path.isfile(args.i):
args.o = args.o or args.i + ".best.log"
autotvm.record.pick_best(args.i, args.o)
elif os.path.isdir(args.i):
args.o = args.o or "best.log"
tmp_filename = args.o + ".tmp"
with open(tmp_filename, "w") as tmp_fout:
for filename in os.listdir(args.i):
if filename.endswith(".log"):
try:
autotvm.record.pick_best(filename, tmp_fout)
except Exception: # pylint: disable=broad-except
warnings.warn("Ignore invalid file %s" % filename)
logging.info("Run final filter...")
autotvm.record.pick_best(tmp_filename, args.o)
os.remove(tmp_filename)
logging.info("Output to %s ...", args.o)
else:
raise ValueError("Invalid input file: " + args.i)
else:
raise ValueError("Invalid action " + args.act)
| 2,419 | 39.333333 | 99 | py |
tvm | tvm-main/python/tvm/exec/query_rpc_tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tool to query RPC tracker status"""
from __future__ import absolute_import
import logging
import argparse
import os
from .. import rpc
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="", help="the hostname of the tracker")
parser.add_argument("--port", type=int, default=None, help="The port of the RPC")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# default to local host or environment variable
if not args.host:
args.host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
if not args.port:
args.port = int(os.environ.get("TVM_TRACKER_PORT", "9190"))
conn = rpc.connect_tracker(args.host, args.port)
# pylint: disable=superfluous-parens
print("Tracker address %s:%d\n" % (args.host, args.port))
print("%s" % conn.text_summary())
if __name__ == "__main__":
main()
| 1,723 | 34.183673 | 91 | py |
tvm | tvm-main/python/tvm/exec/rpc_tracker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Tool to start RPC tracker"""
import logging
import argparse
from ..rpc.tracker import Tracker
def main(args):
"""Main function"""
tracker = Tracker(args.host, port=args.port, port_end=args.port_end, silent=args.silent)
tracker.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9190, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main(args)
| 1,658 | 39.463415 | 100 | py |
tvm | tvm-main/python/tvm/exec/rpc_proxy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""RPC web proxy, allows redirect to websocket based RPC servers(browsers)"""
import logging
import argparse
import os
from tvm.rpc.proxy import Proxy
def find_example_resource():
"""Find resource examples."""
curr_path = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
base_path = os.path.abspath(os.path.join(curr_path, "..", "..", ".."))
index_page = os.path.join(base_path, "web", "apps", "browser", "rpc_server.html")
resource_files = [
os.path.join(base_path, "web", "dist", "tvmjs.bundle.js"),
os.path.join(base_path, "web", "dist", "wasm", "tvmjs_runtime.wasi.js"),
]
resource_base = os.path.join(base_path, "web", "dist", "www")
if os.path.isdir(resource_base):
for fname in os.listdir(resource_base):
full_name = os.path.join(resource_base, fname)
if os.path.isfile(full_name):
resource_files.append(full_name)
for fname in [index_page] + resource_files:
if not os.path.exists(fname):
raise RuntimeError("Cannot find %s" % fname)
return index_page, resource_files
def main(args):
"""Main function"""
if args.tracker:
url, port = args.tracker.split(":")
port = int(port)
tracker_addr = (url, port)
else:
tracker_addr = None
if args.example_rpc:
index, js_files = find_example_resource()
prox = Proxy(
args.host,
port=args.port,
port_end=args.port_end,
web_port=args.web_port,
index_page=index,
resource_files=js_files,
tracker_addr=tracker_addr,
)
else:
prox = Proxy(
args.host,
port=args.port,
port_end=args.port_end,
web_port=args.web_port,
tracker_addr=tracker_addr,
)
prox.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="127.0.0.1", help="the hostname of the server")
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--web-port", type=int, default=8888, help="The port of the http/websocket server"
)
parser.add_argument(
"--example-rpc", type=bool, default=False, help="Whether to switch on example rpc mode"
)
parser.add_argument("--tracker", type=str, default="", help="Report to RPC tracker")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main(args)
| 3,504 | 37.097826 | 100 | py |
tvm | tvm-main/python/tvm/exec/microtvm_debug_shell.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Start an RPC server intended for use as a microTVM debugger.
microTVM aims to be runtime-agnostic, and to that end, frameworks often define command-line tools
used to launch a debug flow. These tools often manage the process of connecting to an attached
device using a hardware debugger, exposing a GDB server, and launching GDB connected to that
server with a source file attached. It's also true that this debugger can typically not be executed
concurrently with any flash tool, so this integration point is provided to allow TVM to launch and
terminate any debuggers integrated with the larger microTVM compilation/autotuning flow.
To use this tool, first launch this script in a separate terminal window. Then, provide the hostport
to your compiler's Flasher instance.
"""
import argparse
import logging
import socket
import struct
import tvm.micro.debugger as _ # NOTE: imported to expose global PackedFuncs over RPC.
from .._ffi.base import py_str
from ..rpc import base
from ..rpc import _ffi_api
_LOG = logging.getLogger(__name__)
def parse_args():
"""Parse command line arguments to this script."""
parser = argparse.ArgumentParser(description="microTVM debug-tool runner")
parser.add_argument("--host", default="0.0.0.0", help="hostname to listen on")
parser.add_argument("--port", type=int, default=9090, help="hostname to listen on")
parser.add_argument(
"--impl",
help=(
"If given, name of a module underneath tvm.micro.contrib "
"which contains the Debugger implementation to use. For example, to enable a "
"debugger named BarDebugger in python/tvm/micro/contrib/foo.py, specify either "
"'tvm.micro.contrib.foo' or 'foo' here. To enable a debugger named BazDebugger in "
"a third-party module ext_package.debugger, specify 'ext_package.debugger' here. "
"NOTE: the module cannot be in a sub-package of tvm.micro.contrib."
),
)
return parser.parse_args()
class ConnectionClosedError(Exception):
"""Raised when the connection is closed."""
def handle_conn(conn, rpc_key):
"""Handle a single connection that has just been accept'd()."""
def send(data):
conn.sendall(data)
return len(data)
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
return
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:"
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
_LOG.warning("mismatch key from %s", addr)
return
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
server = _ffi_api.CreateEventDrivenServer(send, "microtvm-rpc-debugger", key)
def _readall(n):
buf = bytearray()
while len(buf) < n:
x = conn.recv(n - len(buf))
if not x:
raise ConnectionClosedError()
buf = buf + x
return buf
while True:
packet_length_bytes = _readall(8)
packet_length = struct.unpack("<q", packet_length_bytes)[0]
if not packet_length:
break
status = server(packet_length_bytes, 3)
if status == 0:
break
packet_body = _readall(packet_length)
status = server(packet_body, 3)
def main():
"""Main entry point for microTVM debug shell."""
args = parse_args()
logging.basicConfig(level=logging.INFO)
if args.impl:
package = None
if "." not in args.impl:
package = f"tvm.micro.contrib.{args.impl}"
importlib.import_module(args.impl, package)
sock = socket.socket(base.get_addr_family([args.host, args.port]), socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((args.host, args.port))
sock.listen(1)
bind_addr, bind_port = sock.getsockname()
_LOG.info("listening for connections on %s:%d", bind_addr, bind_port)
while True:
conn, peer = sock.accept()
_LOG.info("accepted connection from %s", peer)
try:
handle_conn(conn, "")
except ConnectionClosedError:
pass
finally:
conn.close()
_LOG.info("closed connection from %s", peer)
if __name__ == "__main__":
main()
| 5,414 | 34.392157 | 100 | py |
tvm | tvm-main/python/tvm/exec/popen_worker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Internal PopenWorker for PopenPool."""
import sys
import os
import struct
import threading
import traceback
import pickle
import logging
import cloudpickle
from tvm.contrib.popen_pool import StatusKind
class TimeoutStatus:
__slot__ = ["status"]
def __init__(self):
self.status = StatusKind.RUNNING
def main():
"""Main worker function"""
if len(sys.argv) != 3:
print("Usage: <read_fd> <write_fd>")
return
if sys.platform == "win32":
# pylint: disable=import-outside-toplevel
import msvcrt
reader = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[1]), os.O_BINARY), "rb")
writer = os.fdopen(msvcrt.open_osfhandle(int(sys.argv[2]), os.O_BINARY), "wb")
else:
reader = os.fdopen(int(sys.argv[1]), "rb")
writer = os.fdopen(int(sys.argv[2]), "wb")
logging.basicConfig(level=logging.INFO)
lock = threading.Lock()
def _respond(ret_value):
"""Send data back to the client."""
data = cloudpickle.dumps(ret_value, protocol=pickle.HIGHEST_PROTOCOL)
writer.write(struct.pack("<i", len(data)))
writer.write(data)
writer.flush()
def _cancel_run(status):
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond((StatusKind.TIMEOUT, TimeoutError()))
status.status = StatusKind.TIMEOUT
lock.release()
while True:
raw_bytes_size = reader.read(4)
if len(raw_bytes_size) != 4:
# the parent exited
return
bytes_size = struct.unpack("<i", raw_bytes_size)[0]
fn, args, kwargs, timeout = cloudpickle.loads(reader.read(bytes_size))
status = TimeoutStatus()
if timeout is not None:
watcher = threading.Timer(timeout, _cancel_run, [status])
watcher.daemon = True
watcher.start()
# pylint: disable=broad-except
try:
result = fn(*args, **kwargs)
ret_value = (StatusKind.COMPLETE, result)
except Exception as exception:
msg = traceback.format_exc()
ret_value = (StatusKind.EXCEPTION, type(exception)(msg))
if timeout is not None:
watcher.cancel()
lock.acquire()
if status.status == StatusKind.RUNNING:
_respond(ret_value)
status.status = StatusKind.COMPLETE
lock.release()
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, IOError):
pass
| 3,340 | 29.935185 | 86 | py |
tvm | tvm-main/python/tvm/exec/rpc_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-outer-name, invalid-name
"""Start an RPC server"""
import argparse
import logging
from .. import rpc
def main(args):
"""Main function
Parameters
----------
args : argparse.Namespace
parsed args from command-line invocation
"""
if args.tracker:
url, port = args.tracker.rsplit(":", 1)
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError("Need key to present type of resource when tracker is available")
else:
tracker_addr = None
server = rpc.Server(
args.host,
args.port,
args.port_end,
is_proxy=args.through_proxy,
key=args.key,
tracker_addr=tracker_addr,
load_library=args.load_library,
custom_addr=args.custom_addr,
silent=args.silent,
no_fork=not args.fork,
)
server.proc.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
)
parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
parser.add_argument(
"--through-proxy",
dest="through_proxy",
action="store_true",
help=(
"Whether this server provide service through a proxy. If this is true, the host and"
"port actually is the address of the proxy."
),
)
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--tracker",
type=str,
help=("The address of RPC tracker in host:port format. " "e.g. (10.77.1.234:9190)"),
)
parser.add_argument(
"--key", type=str, default="", help="The key used to identify the device type in tracker."
)
parser.add_argument("--silent", action="store_true", help="Whether run in silent mode.")
parser.add_argument("--load-library", type=str, help="Additional library to load")
parser.add_argument(
"--no-fork",
dest="fork",
action="store_false",
help="Use spawn mode to avoid fork. This option \
is able to avoid potential fork problems with Metal, OpenCL \
and ROCM compilers.",
)
parser.add_argument(
"--custom-addr", type=str, help="Custom IP Address to Report to RPC Tracker"
)
parser.set_defaults(fork=True)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if not args.fork is False and not args.silent:
logging.info(
"If you are running ROCM/Metal, fork will cause "
"compiler internal error. Try to launch with arg ```--no-fork```"
)
main(args)
| 3,616 | 34.116505 | 100 | py |
tvm | tvm-main/python/tvm/exec/gpu_memory_bandwidth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A script to measure GPU memory bandwidth"""
import argparse
import itertools
import numpy as np
import tvm
from tvm import te, tir
from tvm.meta_schedule.runner import EvaluatorConfig
from tvm.testing import local_run
def _parse_args() -> argparse.Namespace:
def _parse_list_int(source: str):
return [int(i) for i in source.split(",")]
parser = argparse.ArgumentParser(
prog="GPU memory bandwidth testing",
description="""Example:
python -m tvm.exec.gpu_memory_bandwidth "nvidia/geforce-rtx-3090-ti" \
--dtype "float32"
--bx "8,16,32,64,128,256" \
--tx "32,64,128,256,512,1024" \
--vec "1,2,4"
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"target",
type=str,
help="The target to be benchmarked",
)
parser.add_argument(
"--xo",
type=int,
default=1024,
help="The value of `XO` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--k",
type=int,
default=64,
help="The value of `K` in [XO, K, XI] => [XO, XI] reduction",
)
parser.add_argument(
"--xi",
type=int,
default=4096,
help="The value of `XI` in [XO, K, XI] -> [XO, XI] reduction",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="The data type to be used in the workload",
)
parser.add_argument(
"--bx",
type=_parse_list_int,
default=[8, 16, 32, 64, 128, 256],
help="The value to be used to split `XO` into [BX, _]",
)
parser.add_argument(
"--tx",
type=_parse_list_int,
default=[32, 64, 128, 256, 512, 1024],
help="Number of threads to be used",
)
parser.add_argument(
"--vec",
type=_parse_list_int,
default=[1, 2, 4],
help="Vector length to be used in vectorized load",
)
return parser.parse_args()
def _workload(
len_xo: int,
len_k: int,
len_xi: int,
dtype: str,
):
# pylint: disable=invalid-name
A = te.placeholder((len_xo, len_k, len_xi), dtype=dtype, name="A")
k = te.reduce_axis((0, len_k), "k")
B = te.compute(
(len_xo, len_xi),
lambda i, j: te.sum(A[i, k, j], axis=k),
name="B",
)
# pylint: enable=invalid-name
return te.create_prim_func([A, B])
def _schedule(
sch: tir.Schedule,
len_bx: int,
len_tx: int,
len_vec: int,
):
# pylint: disable=invalid-name
block = sch.get_block("B")
xo, xi, k = sch.get_loops(block)
bx, xo = sch.split(xo, factors=[len_bx, None])
xi, tx, vec = sch.split(xi, factors=[None, len_tx, len_vec])
sch.reorder(bx, xi, tx, xo, k, vec)
bx = sch.fuse(bx, xi)
sch.bind(bx, "blockIdx.x")
sch.bind(tx, "threadIdx.x")
ldg = sch.cache_read(block, 0, "local")
sch.compute_at(ldg, k, preserve_unit_loops=True)
sch.vectorize(sch.get_loops(ldg)[-1])
sch.decompose_reduction(block, k)
# pylint: enable=invalid-name
def main(): # pylint: disable=too-many-locals
"""Entry point"""
args = _parse_args()
# pylint: disable=invalid-name
target = tvm.target.Target(args.target)
dtype = args.dtype
a = np.random.uniform(-1, 1, (args.xo, args.k, args.xi)).astype(dtype)
b = np.zeros((args.xo, args.xi), dtype=dtype)
num_bytes = a.size * a.itemsize + b.size * b.itemsize
print("###### Bandwidth Test ######")
print(
f"Workload [XO, K, XI] => [XO, XI]. "
f"[{args.xo}, {args.k}, {args.xi}] => [{args.xo}, {args.xi}]"
)
print(f"Input size: {num_bytes / 1048576} MB")
print(f"Target: {target}")
# pylint: enable=invalid-name
best_bandwidth = -1
for len_bx, len_tx, len_vec in itertools.product(
args.bx,
args.tx,
args.vec,
):
func = _workload(
len_xo=args.xo,
len_k=args.k,
len_xi=args.xi,
dtype=dtype,
)
sch = tir.Schedule(func)
_schedule(sch, len_bx, len_tx, len_vec)
_, profile_result = local_run(
tvm.build(sch.mod, target=target),
target.kind.name,
[a, b],
evaluator_config=EvaluatorConfig(
number=10,
repeat=1,
min_repeat_ms=100,
enable_cpu_cache_flush=False,
),
)
bandwidth = num_bytes / profile_result.mean / (1024**3)
bx = len_bx * args.xi // (len_tx * len_vec) # pylint: disable=invalid-name
mbs = num_bytes / 1024 / 1024
print(
f"bandwidth = {bandwidth:.3f} GB/s, bx = {bx}, tx = {len_tx}, "
f"len_vec = {len_vec}, bytes = {mbs} MB"
)
if bandwidth > best_bandwidth:
best_bandwidth = bandwidth
print(f"peak bandwidth: {best_bandwidth:.3f} GB/s")
if __name__ == "__main__":
main()
| 5,788 | 28.994819 | 83 | py |
tvm | tvm-main/python/tvm/exec/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace of executables python files that directly run throw cmd"""
| 857 | 46.666667 | 71 | py |
tvm | tvm-main/tests/python/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
import sys
import pytest
COLLECT_IGNORE = []
if sys.platform.startswith("win"):
COLLECT_IGNORE.append("frontend/caffe")
COLLECT_IGNORE.append("frontend/caffe2")
COLLECT_IGNORE.append("frontend/coreml")
COLLECT_IGNORE.append("frontend/darknet")
COLLECT_IGNORE.append("frontend/keras")
COLLECT_IGNORE.append("frontend/mxnet")
COLLECT_IGNORE.append("frontend/pytorch")
COLLECT_IGNORE.append("frontend/tensorflow")
COLLECT_IGNORE.append("frontend/tflite")
COLLECT_IGNORE.append("frontend/onnx")
COLLECT_IGNORE.append("driver/tvmc/test_autoscheduler.py")
COLLECT_IGNORE.append("unittest/test_auto_scheduler_cost_model.py") # stack overflow
# COLLECT_IGNORE.append("unittest/test_auto_scheduler_measure.py") # exception ignored
COLLECT_IGNORE.append("unittest/test_auto_scheduler_search_policy.py") # stack overflow
# COLLECT_IGNORE.append("unittest/test_auto_scheduler_measure.py") # exception ignored
COLLECT_IGNORE.append("unittest/test_tir_intrin.py")
def pytest_addoption(parser):
parser.addoption(
"--enable-corstone300-tests",
action="store_true",
default=False,
help="Run Corstone-300 FVP tests",
)
def pytest_collection_modifyitems(config, items):
if not config.getoption("--enable-corstone300-tests"):
for item in items:
if "corstone300" in item.keywords:
item.add_marker(
pytest.mark.skip(
reason="Need --enable-corstone300-tests option to run this test"
)
)
| 2,404 | 39.083333 | 92 | py |
tvm | tvm-main/tests/python/all-platform-minimal-test/test_minimal_target_codegen_llvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LLVM enablement tests."""
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.contrib import utils
import numpy as np
import ctypes
import math
import re
@tvm.testing.requires_llvm
def test_llvm_add_pipeline():
"""all-platform-minimal-test: Check LLVM enablement."""
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
def check_llvm():
# Specifically allow offset to test codepath when offset is available
Ab = tvm.tir.decl_buffer(
A.shape, A.dtype, elem_offset=te.size_var("Aoffset"), offset_factor=8, name="A"
)
binds = {A: Ab}
# BUILD and invoke the kernel.
f = tvm.build(s, [A, B, C], "llvm", binds=binds)
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_llvm()
| 2,495 | 35.705882 | 91 | py |
tvm | tvm-main/tests/python/all-platform-minimal-test/test_runtime_packed_func.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test packed function FFI."""
import tvm
from tvm import te
import tvm.testing
import numpy as np
def test_get_global():
targs = (10, 10.0, "hello")
# register into global function table
@tvm.register_func
def my_packed_func(*args):
assert tuple(args) == targs
return 10
# get it out from global function table
f = tvm.get_global_func("my_packed_func")
assert isinstance(f, tvm.runtime.PackedFunc)
y = f(*targs)
assert y == 10
def test_get_callback_with_node():
x = tvm.runtime.convert(10)
def test(y):
assert y.handle != x.handle
return y
f2 = tvm.runtime.convert(test)
# register into global function table
@tvm.register_func
def my_callback_with_node(y, f):
assert y == x
return f(y)
# get it out from global function table
f = tvm.get_global_func("my_callback_with_node")
assert isinstance(f, tvm.runtime.PackedFunc)
y = f(x, f2)
assert y.value == 10
def test_return_func():
def addy(y):
def add(x):
return tvm.runtime.convert(x + y)
return add
myf = tvm.runtime.convert(addy)
f = myf(10)
assert f(11).value == 21
def test_convert():
# convert a function to tvm function
targs = (10, 10.0, "hello", 10)
def myfunc(*args):
assert tuple(args) == targs
f = tvm.runtime.convert(myfunc)
assert isinstance(f, tvm.runtime.PackedFunc)
def test_byte_array():
s = "hello"
a = bytearray(s, encoding="ascii")
def myfunc(ss):
assert ss == a
f = tvm.runtime.convert(myfunc)
f(a)
def test_empty_array():
def myfunc(ss):
assert tuple(ss) == ()
x = tvm.runtime.convert(())
tvm.runtime.convert(myfunc)(x)
def test_device():
def test_device_func(dev):
assert tvm.cuda(7) == dev
return tvm.cpu(0)
x = test_device_func(tvm.cuda(7))
assert x == tvm.cpu(0)
x = tvm.opencl(10)
x = tvm.testing.device_test(x, x.device_type, x.device_id)
assert x == tvm.opencl(10)
def test_rvalue_ref():
def callback(x, expected_count):
assert expected_count == tvm.testing.object_use_count(x)
return x
f = tvm.runtime.convert(callback)
def check0():
x = tvm.tir.Var("x", "int32")
assert tvm.testing.object_use_count(x) == 1
f(x, 2)
y = f(x._move(), 1)
assert x.handle.value == None
def check1():
x = tvm.tir.Var("x", "int32")
assert tvm.testing.object_use_count(x) == 1
y = f(x, 2)
z = f(x._move(), 2)
assert x.handle.value == None
assert y.handle.value is not None
check0()
check1()
def test_numpy_scalar():
maxint = (1 << 63) - 1
assert tvm.testing.echo(np.int64(maxint)) == maxint
def test_ndarray_args():
def check(arr):
assert not arr.is_view
assert tvm.testing.object_use_count(arr) == 2
fcheck = tvm.runtime.convert(check)
x = tvm.nd.array([1, 2, 3])
fcheck(x)
assert tvm.testing.object_use_count(x) == 1
def test_dict_function_value_type():
from tvm import tir # pylint: disable=import-outside-toplevel
te_func_dict = {"add": lambda a, b: a + b}
converted_dict = tvm.runtime.convert(te_func_dict)
f = converted_dict["add"]
a = tir.Var("a", "float32")
b = tir.Var("b", "float32")
tvm.ir.assert_structural_equal(f(a, b), tir.Add(a, b))
if __name__ == "__main__":
test_ndarray_args()
test_numpy_scalar()
test_rvalue_ref()
test_empty_array()
test_get_global()
test_get_callback_with_node()
test_convert()
test_return_func()
test_byte_array()
test_device()
test_dict_function_value_type()
| 4,518 | 24.105556 | 66 | py |
tvm | tvm-main/tests/python/all-platform-minimal-test/test_runtime_ndarray.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Basic runtime enablement test."""
import tvm
from tvm import te
import numpy as np
import tvm.testing
@tvm.testing.uses_gpu
def test_nd_create():
for target, dev in tvm.testing.enabled_targets():
for dtype in ["uint8", "int8", "uint16", "int16", "uint32", "int32", "float32"]:
x = np.random.randint(0, 10, size=(3, 4))
x = np.array(x, dtype=dtype)
y = tvm.nd.array(x, device=dev)
z = y.copyto(dev)
assert y.dtype == x.dtype
assert y.shape == x.shape
assert isinstance(y, tvm.nd.NDArray)
np.testing.assert_equal(x, y.numpy())
np.testing.assert_equal(x, z.numpy())
# no need here, just to test usablity
dev.sync()
def test_fp16_conversion():
n = 100
for (src, dst) in [("float32", "float16"), ("float16", "float32")]:
A = te.placeholder((n,), dtype=src)
B = te.compute((n,), lambda i: A[i].astype(dst))
s = te.create_schedule([B.op])
func = tvm.build(s, [A, B], "llvm")
x_tvm = tvm.nd.array(100 * np.random.randn(n).astype(src) - 50)
y_tvm = tvm.nd.array(100 * np.random.randn(n).astype(dst) - 50)
func(x_tvm, y_tvm)
expected = x_tvm.numpy().astype(dst)
real = y_tvm.numpy()
tvm.testing.assert_allclose(expected, real)
def test_dtype():
dtype = tvm.DataType("handle")
assert dtype.type_code == tvm.DataTypeCode.HANDLE
if __name__ == "__main__":
test_nd_create()
test_fp16_conversion()
test_dtype()
| 2,343 | 31.555556 | 88 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lower_device_storage_access_info.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
@tvm.register_func("tvm.info.mem.global.test_with_head_address")
def mem_info_with_head_address():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_simd_bits=32,
max_num_bits=128,
head_address=tvm.tir.call_extern("handle", "dummy_head_address"),
)
@tvm.register_func("tvm.info.mem.global.test_without_head_address")
def mem_info_without_head_address():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_simd_bits=32,
max_num_bits=128,
head_address=None,
)
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.LowerDeviceStorageAccessInfo()
class TestLowerCPUAccessibleScope(BaseCompare):
"""Allocate of CPU-visible buffers are replaced by LetStmt
For scopes that are accessible by the CPU (e.g. VTCM on hexagon),
the head address specifies how it should be accessed, and is used
to replace the AllocateNode.
"""
def before():
ptr = T.allocate([16], "float32", scope="global.test_with_head_address")
T.evaluate(ptr)
def expected():
ptr: T.handle("float32", "global.test_with_head_address") = T.call_extern( # noqa: F722
"handle", "dummy_head_address"
)
T.evaluate(ptr)
class TestLowerCPUAccessibleScopeWithDeclBuffer(BaseCompare):
"""Like TestLowerCPUAccessibleScope, but with a DeclBuffer.
When the Allocate is updated, the DeclBuffer should not contain a
dangling reference.
"""
def before():
buf = T.decl_buffer(16, "float32", scope="global.test_with_head_address")
T.evaluate(buf.data)
def expected():
ptr: T.handle("float32", "global.test_with_head_address") = T.call_extern( # noqa: F722
"handle", "dummy_head_address"
)
buf = T.decl_buffer(16, "float32", scope="global.test_with_head_address", data=ptr)
T.evaluate(ptr)
class TestLowerCPUInaccessibleScope(BaseCompare):
"""Allocate of CPU-visible buffers are replaced by LetStmt
For scopes that are inaccessible by the CPU (e.g. Texture memory
on GPU), the allocate is removed. All CPU-side references to the
buffer should have been lowered by this point.
"""
def before():
ptr = T.allocate([16], "float32", scope="global.test_without_head_address")
T.evaluate(0)
def expected():
T.evaluate(0)
class TestLowerCPUInaccessibleScopeWithDeclBuffer(BaseCompare):
"""Like TestLowerCPUInaccessibleScope, but with a DeclBuffer
When the Allocate is removed, the DeclBuffer should not contain a
dangling reference.
"""
def before():
buf = T.decl_buffer(16, "float32", scope="global.test_without_head_address")
T.evaluate(0)
def expected():
T.evaluate(0)
if __name__ == "__main__":
tvm.testing.main()
| 3,715 | 30.226891 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_compute_dag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test ComputeDAG (replay, infer bound)"""
import json
import pickle
import tvm
from tvm import topi
from tvm import auto_scheduler, te
from tvm.testing.auto_scheduler import (
get_tiled_matmul,
invalid_compute_definition,
matmul_auto_scheduler_test,
parallel_matmul_auto_scheduler_test,
)
def test_apply_steps():
dag, s = get_tiled_matmul()
dag.print_python_code_from_state(s)
sch, tensors = dag.apply_steps_from_state(s)
tvm.lower(sch, tensors, simple_mode=True)
def test_infer_bound():
dag, s = get_tiled_matmul()
s = dag.infer_bound_from_state(s)
def test_estimate_flop():
N = 512
A, B, C = matmul_auto_scheduler_test(N, N, N)
dag = auto_scheduler.ComputeDAG([A, B, C])
assert abs(dag.flop_ct - 2 * N**3) < 0.5
D = topi.nn.relu(C)
dag = auto_scheduler.ComputeDAG([A, B, D])
assert abs(dag.flop_ct - (2 * N**3 + N * N)) < 0.5
# should not count the comparison operations in padding
E = topi.nn.pad(C, [1, 1])
dag = auto_scheduler.ComputeDAG([A, B, E])
assert abs(dag.flop_ct - 2 * N**3) < 0.5
F = te.compute((N, N), lambda i, j: E[i, j], name="F", attrs={"FLOP": 1234})
dag = auto_scheduler.ComputeDAG([A, B, F])
assert abs(dag.flop_ct - (2 * N**3 + 1234)) < 0.5
A = te.placeholder((N, N), dtype="float32", name="A")
F = te.compute((N, N), lambda i, j: te.if_then_else(A[i, j] > 0, A[i, j], 0))
dag = auto_scheduler.ComputeDAG([A, F])
assert abs(dag.flop_ct - N**2) < 0.5
def test_stage_order():
"""Test if the stage order is preserved when recovering a DAG."""
N = 512
A, B, C, D, E = parallel_matmul_auto_scheduler_test(N)
sch = te.create_schedule([D.op, E.op])
(D_local,) = sch.cache_write([D], "local")
(E_local,) = sch.cache_write([E], "local")
sch.cache_read(A, "shared", [D_local])
sch.cache_read(B, "shared", [D_local])
sch.cache_read(A, "shared", [E_local])
sch.cache_read(C, "shared", [E_local])
dag = auto_scheduler.ComputeDAG(sch)
stage_ops_1 = dag.get_init_state().stage_ops
# 3 placeholder, 4 x.shared, 2 {D,E}.local, 2 {D,E} compute
assert len(stage_ops_1) == 11
# Cache read stage should follow the source stage
for idx, op in enumerate(stage_ops_1):
if op.name == "A":
assert (
stage_ops_1[idx + 1].name == "A.d.shared"
and stage_ops_1[idx + 2].name == "A.shared"
)
elif op.name in ["B", "C"]:
assert stage_ops_1[idx + 1].name == "%s.shared" % op.name
# Apply the same schedule to Ansor state and it should have the same stage order
dag = auto_scheduler.ComputeDAG([A, B, C, D, E])
state = dag.get_init_state()
D_local = state.cache_write(D, "local")
E_local = state.cache_write(E, "local")
state.cache_read(A, "shared", [D_local])
state.cache_read(B, "shared", [D_local])
state.cache_read(A, "shared", [E_local])
state.cache_read(C, "shared", [E_local])
stage_ops_2 = state.stage_ops
assert len(stage_ops_1) == len(stage_ops_2)
# Cache read stage should follow the source stage
for op1, op2 in zip(stage_ops_1, stage_ops_2):
assert op1.name == op2.name
# Serialize and deserialize the ComputeDAG constructed by a list of tensor ops.
loaded_dag = pickle.loads(pickle.dumps(dag))
assert str(loaded_dag.get_init_state()) == str(dag.get_init_state())
assert len(loaded_dag.get_init_state().stage_ops) == len(dag.get_init_state().stage_ops)
# Serialize and deserialize the search task. Note that we intentionally skip hardware_params
# to test if the default one is serialized along with other attributes as well.
task = auto_scheduler.SearchTask(
compute_dag=dag, workload_key=json.dumps(("test-key",)), target=tvm.target.Target("llvm")
)
task2 = pickle.loads(pickle.dumps(task))
assert '["test-key"]' in auto_scheduler.workload_registry.WORKLOAD_FUNC_REGISTRY
assert str(task.compute_dag.get_init_state()) == str(task2.compute_dag.get_init_state())
assert len(task.compute_dag.get_init_state().stage_ops) == len(
task2.compute_dag.get_init_state().stage_ops
)
assert task.workload_key == task2.workload_key
assert str(task.target) == str(task2.target)
assert task.hardware_params.num_cores == task2.hardware_params.num_cores
assert task.hardware_params.vector_unit_bytes == task2.hardware_params.vector_unit_bytes
assert task.hardware_params.cache_line_bytes == task2.hardware_params.cache_line_bytes
def test_invalid_compute_dag():
failed = False
try:
A, B = invalid_compute_definition()
auto_scheduler.ComputeDAG([A, B])
except tvm.TVMError:
failed = True
assert failed
if __name__ == "__main__":
test_apply_steps()
test_infer_bound()
test_estimate_flop()
test_stage_order()
test_invalid_compute_dag()
| 5,706 | 35.120253 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_ir_builder_irmodule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.ir"""
import pytest
import tvm.testing
from tvm.script.ir_builder import IRBuilder
from tvm.script.ir_builder import ir as I
from tvm import ir
from tvm.ir.base import assert_structural_equal
def test_ir_builder_irmodule():
with IRBuilder() as ib: # pylint: disable=invalid-name
with I.ir_module():
pass
# the ir_module generated by IRBuilder
ir_module_actual = ib.get()
# the expected prim_func
ir_module_expected = ir.IRModule(None, None)
assert_structural_equal(ir_module_actual, ir_module_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| 1,449 | 33.52381 | 85 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lift_thread_binding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm.script import tir as T
def test_lift_tx_beyond_local():
# fmt: off
@T.prim_func
def before(a: T.handle, b: T.handle, c: T.handle):
n = T.int32()
A = T.match_buffer(a, (32, 1, 128))
B = T.match_buffer(b, (32, n, 128))
C = T.match_buffer(c, (32, 1, n))
for ax0_ax1_fused in T.thread_binding(n * 32, thread="blockIdx.x"):
with T.block(""):
T.reads(A[ax0_ax1_fused // n, 0, 0:256], B[ax0_ax1_fused // n, ax0_ax1_fused % n, 0:256])
T.writes(C[ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
D_local = T.alloc_buffer((32, 1, n), scope="local")
D_rf_local = T.alloc_buffer((256, 32, 1, n), scope="local")
for ax2_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("NT_matmul_rf_init"):
T.reads()
T.writes(D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n] = T.float32(0)
for ax2_fused_0 in range(1):
with T.block("NT_matmul_rf_update"):
T.where(ax2_fused_0 * 256 + ax2_fused_1 < 128)
T.reads(D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n], A[ax0_ax1_fused // n, 0, ax2_fused_0 * 256 + ax2_fused_1], B[ax0_ax1_fused // n, ax0_ax1_fused % n, ax2_fused_0 * 256 + ax2_fused_1])
T.writes(D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n] = D_rf_local[ax2_fused_1, ax0_ax1_fused // n, 0, ax0_ax1_fused % n] + A[ax0_ax1_fused // n, 0, ax2_fused_0 * 256 + ax2_fused_1] * B[ax0_ax1_fused // n, ax0_ax1_fused % n, ax2_fused_0 * 256 + ax2_fused_1]
for ax1_ax2_fused in range(1):
for ax0_fused in T.thread_binding(256, thread="threadIdx.x"):
with T.block(""):
T.reads(D_rf_local[ax0_fused, ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
T.writes(D_local[ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
cross_thread_D_local = T.alloc_buffer((1,), strides=(1,), scope="local")
in_thread_D_local = T.alloc_buffer((1,), strides=(1,), scope="local")
with T.block("NT_matmul_in_thread_init"):
T.reads()
T.writes(in_thread_D_local[0])
in_thread_D_local[0] = T.float32(0)
with T.block("NT_matmul_in_thread"):
T.where(0 <= ax0_ax1_fused // n and ax0_ax1_fused // n < 32 and 0 <= ax0_ax1_fused % n and ax0_ax1_fused % n < n)
T.reads(D_rf_local[ax0_fused, ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
T.writes(in_thread_D_local[0])
in_thread_D_local[0] = in_thread_D_local[0] + D_rf_local[ax0_fused, ax0_ax1_fused // n, 0, ax0_ax1_fused % n]
with T.block("NT_matmul_cross_thread"):
T.reads(in_thread_D_local[0])
T.writes(cross_thread_D_local[0])
T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0)))
T.tvm_thread_allreduce(T.uint32(1), in_thread_D_local[0], T.bool(True), cross_thread_D_local[0], ax0_fused)
with T.block("NT_matmul_write_back"):
T.where(ax0_fused == 0)
T.reads(cross_thread_D_local[0])
T.writes(D_local[ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
D_local[ax0_ax1_fused // n, 0, ax0_ax1_fused % n] = cross_thread_D_local[0]
with T.block("T_divide"):
T.where(0 <= ax0_ax1_fused // n and ax0_ax1_fused // n < 32 and 0 <= ax0_ax1_fused % n and ax0_ax1_fused % n < n)
T.reads(D_local[ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
T.writes(C[ax0_ax1_fused // n, 0, ax0_ax1_fused % n])
C[ax0_ax1_fused // n, 0, ax0_ax1_fused % n] = D_local[ax0_ax1_fused // n, 0, ax0_ax1_fused % n] * T.float32(0.088397790055248615)
@T.prim_func
def expected(A: T.Buffer((32, 1, 128), "float32"), b: T.handle, c: T.handle):
n = T.int32()
B = T.match_buffer(b, (32, n, 128))
C = T.match_buffer(c, (32, 1, n))
# with T.block("root"):
for blockIdx_x in T.thread_binding(n * 32, thread="blockIdx.x"):
for threadIdx_x in T.thread_binding(256, thread="threadIdx.x"):
with T.block(""):
T.reads(A[blockIdx_x // n, 0, 0:256], B[blockIdx_x // n, blockIdx_x % n, 0:256])
T.writes(C[blockIdx_x // n, 0, blockIdx_x % n])
D_local = T.alloc_buffer((32, 1, n), scope="local")
D_rf_local = T.alloc_buffer((256, 32, 1, n), scope="local")
with T.block("NT_matmul_rf_init"):
T.reads()
T.writes(D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n])
D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n] = T.float32(0)
for ax2_fused_0 in range(1):
with T.block("NT_matmul_rf_update"):
T.where(ax2_fused_0 * 256 + threadIdx_x < 128)
T.reads(D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n], A[blockIdx_x // n, 0, ax2_fused_0 * 256 + threadIdx_x], B[blockIdx_x // n, blockIdx_x % n, ax2_fused_0 * 256 + threadIdx_x])
T.writes(D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n])
D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n] = D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n] + A[blockIdx_x // n, 0, ax2_fused_0 * 256 + threadIdx_x] * B[blockIdx_x // n, blockIdx_x % n, ax2_fused_0 * 256 + threadIdx_x]
for ax1_ax2_fused in range(1):
with T.block(""):
T.reads(D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n])
T.writes(D_local[blockIdx_x // n, 0, blockIdx_x % n])
cross_thread_D_local = T.alloc_buffer((1,), strides=(1,), scope="local")
in_thread_D_local = T.alloc_buffer((1,), strides=(1,), scope="local")
with T.block("NT_matmul_in_thread_init"):
T.reads()
T.writes(in_thread_D_local[0])
in_thread_D_local[0] = T.float32(0)
with T.block("NT_matmul_in_thread"):
T.where(0 <= blockIdx_x // n and blockIdx_x // n < 32 and 0 <= blockIdx_x % n and blockIdx_x % n < n)
T.reads(D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n])
T.writes(in_thread_D_local[0])
in_thread_D_local[0] = in_thread_D_local[0] + D_rf_local[threadIdx_x, blockIdx_x // n, 0, blockIdx_x % n]
with T.block("NT_matmul_cross_thread"):
T.reads(in_thread_D_local[0])
T.writes(cross_thread_D_local[0])
T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0)))
T.tvm_thread_allreduce(T.uint32(1), in_thread_D_local[0], T.bool(True), cross_thread_D_local[0], threadIdx_x)
with T.block("NT_matmul_write_back"):
T.where(threadIdx_x == 0)
T.reads(cross_thread_D_local[0])
T.writes(D_local[blockIdx_x // n, 0, blockIdx_x % n])
D_local[blockIdx_x // n, 0, blockIdx_x % n] = cross_thread_D_local[0]
with T.block("T_divide"):
T.where(0 <= blockIdx_x // n and blockIdx_x // n < 32 and 0 <= blockIdx_x % n and blockIdx_x % n < n)
T.reads(D_local[blockIdx_x // n, 0, blockIdx_x % n])
T.writes(C[blockIdx_x // n, 0, blockIdx_x % n])
C[blockIdx_x // n, 0, blockIdx_x % n] = D_local[blockIdx_x // n, 0, blockIdx_x % n] * T.float32(0.088397790055248615)
# fmt: on
mod = tvm.IRModule({"main": before})
after = tir.transform.LiftThreadBinding()(mod)
tvm.ir.assert_structural_equal(expected, after["main"])
if __name__ == "__main__":
test_lift_tx_beyond_local()
| 9,997 | 70.414286 | 297 | py |
tvm | tvm-main/tests/python/unittest/test_tir_index_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.ir import assert_structural_equal
from tvm.runtime import const
from tvm.tir import IndexMap, IntImm, floordiv, floormod
def assert_equal_index_map(map1: IndexMap, map2: IndexMap) -> None:
iters_1 = map1.map_indices(map2.initial_indices)
iters_2 = map2.final_indices
assert len(iters_1) == len(iters_2)
analyzer = tvm.arith.Analyzer()
for iter1, iter2 in zip(iters_1, iters_2):
assert analyzer.can_prove_equal(iter1, iter2)
def test_index_mapping():
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4], index_dtype="int32")
assert_structural_equal(index_map.map_indices([0]), [0, 0])
assert_structural_equal(index_map.map_indices([3]), [0, 3])
assert_structural_equal(index_map.map_indices([4]), [1, 0])
assert_structural_equal(index_map.map_indices([42]), [10, 2])
assert_structural_equal(
index_map.map_indices([const(42, "int64")]), [const(10, "int64"), const(2, "int64")]
)
def test_shape_mapping():
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4], index_dtype="int32")
assert_structural_equal(index_map.map_shape([4]), [1, 4])
assert_structural_equal(index_map.map_shape([16]), [4, 4])
assert_structural_equal(index_map.map_shape([14]), [4, 4])
assert_structural_equal(
index_map.map_shape([const(16, "int64")]), [const(4, "int64"), const(4, "int64")]
)
assert_structural_equal(
index_map.map_shape([const(14, "int64")]), [const(4, "int64"), const(4, "int64")]
)
def test_inverse():
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4])
expected_inverse = IndexMap.from_func(lambda i, j: [4 * i + j])
assert index_map.inverse([16]).is_equivalent_to(expected_inverse)
def test_nonbijective_inverse_gives_error():
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4])
with pytest.raises(tvm.TVMError):
index_map.inverse([14])
dynamic_N = tvm.tir.Var("N", "int32")
padding_test_case = tvm.testing.parameter(
by_dict={
"no_padding": dict(
forward=lambda i: [i // 4, i % 4],
inverse=lambda i, j: [4 * i + j],
pre_shape=[16],
post_shape=[4, 4],
padding=lambda i, j: tvm.runtime.convert(False),
),
"right_padding": dict(
forward=lambda i: [i // 4, i % 4],
inverse=lambda i, j: [4 * i + j],
pre_shape=[15],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.And(i == 3, tvm.runtime.convert(3) == j),
),
"left_padding": dict(
forward=lambda i: [(i + 1) // 4, (i + 1) % 4],
inverse=lambda i, j: [4 * i + j - 1],
pre_shape=[15],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.And(i == 0, j < 1),
),
"left_and_right_padding": dict(
forward=lambda i: [(i + 1) // 4, (i + 1) % 4],
inverse=lambda i, j: [4 * i + j - 1],
pre_shape=[14],
post_shape=[4, 4],
padding=lambda i, j: tvm.tir.Or(
tvm.tir.And(i == 0, j < 1),
tvm.tir.And(i == 3, tvm.runtime.convert(3) == j),
),
),
"dynamic_size": dict(
forward=lambda i: [i // 4, i % 4],
inverse=lambda i, j: [4 * i + j],
pre_shape=[dynamic_N],
post_shape=[(dynamic_N - dynamic_N % (-4)) // 4, 4],
padding=lambda i, j: tvm.tir.And(
dynamic_N % (-4) != 0,
tvm.tir.And(i == dynamic_N // 4, j >= dynamic_N % 4),
),
),
"2d_padding": dict(
forward=lambda i, j: [(i + 1) // 4, (j + 5) // 8, (i + 1) % 4, (j + 5) % 8],
inverse=lambda i_outer, j_outer, i_inner, j_inner: [
4 * i_outer + i_inner - 1,
8 * j_outer + j_inner - 5,
],
pre_shape=[14, 31],
post_shape=[
4, # ceildiv(left_pad + i.extent, 4) = ceildiv(1 + 14, 4) = 4
5, # ceildiv(left_pad + j.extent, 8) = ceildiv(5 + 31, 8) = 5
4, # Range of iter%4
8, # Range of iter%8
],
padding=lambda i_outer, j_outer, i_inner, j_inner: tvm.tir.Or(
tvm.tir.Or(
tvm.tir.And(i_outer == 0, i_inner < 1),
tvm.tir.And(i_outer == 3, tvm.runtime.convert(3) == i_inner),
),
tvm.tir.Or(
tvm.tir.And(j_outer == 0, j_inner < 5),
tvm.tir.And(j_outer == 4, j_inner >= 4),
),
),
),
"multiple_right_padding": dict(
forward=lambda i: [i // 32, (i // 4) % 8, i % 4],
inverse=lambda i, j, k: [32 * i + 4 * j + k],
pre_shape=[116],
post_shape=[4, 8, 4],
padding=lambda i, j, k: tvm.tir.And(i == 3, 4 * j + k >= 20),
),
"multiple_right_padding_transpose": dict(
forward=lambda i: [(i // 4) % 8, i // 32, i % 4],
inverse=lambda j, i, k: [32 * i + 4 * j + k],
pre_shape=[116],
post_shape=[8, 4, 4],
padding=lambda j, i, k: tvm.tir.And(i == 3, 4 * j + k >= 20),
),
"multiple_left_padding": dict(
forward=lambda i: [(i + 5) // 32, ((i + 5) // 4) % 8, (i + 5) % 4],
inverse=lambda i, j, k: [32 * i + 4 * j + k - 5],
pre_shape=[123],
post_shape=[4, 8, 4],
padding=lambda i, j, k: tvm.tir.And(i == 0, j * 4 + k < 5),
),
"multiple_left_padding_with_transpose": dict(
forward=lambda i: [((i + 5) // 4) % 8, (i + 5) // 32, (i + 5) % 4],
inverse=lambda j, i, k: [32 * i + 4 * j + k - 5],
pre_shape=[123],
post_shape=[8, 4, 4],
padding=lambda j, i, k: tvm.tir.And(i == 0, j * 4 + k < 5),
),
"outer_loop_extent_one": dict(
forward=lambda i: [i // 4, i % 4],
inverse=lambda i, j: [i * 4 + j],
pre_shape=[3],
post_shape=[1, 4],
padding=lambda i, j: tvm.runtime.convert(3) == j,
),
}
)
def test_nonsurjective_inverse(padding_test_case):
index_map = IndexMap.from_func(padding_test_case["forward"], index_dtype="int32")
inverse, padding_predicate = index_map.non_surjective_inverse(padding_test_case["pre_shape"])
expected_inverse = IndexMap.from_func(padding_test_case["inverse"])
assert inverse.is_equivalent_to(expected_inverse)
post_shape = index_map.map_shape(padding_test_case["pre_shape"])
tvm.ir.assert_structural_equal(post_shape, padding_test_case["post_shape"])
expected_predicate = padding_test_case["padding"](*inverse.initial_indices)
# Can't use analyzer.can_prove_equal, because it can't simplify
# expressions like `(4*i+j >= 14) - (4*i+j >= 14)`.
analyzer = tvm.arith.Analyzer()
expected_predicate = analyzer.simplify(expected_predicate)
padding_predicate = analyzer.simplify(padding_predicate)
tvm.ir.assert_structural_equal(padding_predicate, expected_predicate)
def test_index_map_inverse_no_iter():
def input_example(i0, i1, i2, i3):
j0 = floordiv(i3, 32)
j1 = floordiv(i2, 2)
j2 = floormod(i2, 2)
j3 = floormod(i3, 32)
return j0, j1, j2, j3
def expected_inverse(i0, i1, i2, i3):
return IntImm("int32", 0), IntImm("int32", 0), i2 + i1 * 2, i3 + i0 * 32
index_map = IndexMap.from_func(input_example)
inverse_map = index_map.inverse([1, 1, 64, 64])
expected_map = IndexMap.from_func(expected_inverse)
assert expected_map.is_equivalent_to(inverse_map)
def test_map_ndarray():
index_map = IndexMap.from_func(lambda i: [i // 4, i % 4])
inp = np.arange(16).astype("int8")
out = index_map.map_ndarray(tvm.nd.array(inp)).numpy()
ref = np.zeros(out.shape).astype("int8")
for i in range(16):
ref[i // 4, i % 4] = inp[i]
np.testing.assert_equal(ref, out)
index_map = IndexMap.from_func(lambda i0, i1, i2, i3: (i3, i0, i1, i2))
inp = np.random.randn(10, 10, 10, 10).astype("float16")
out = index_map.map_ndarray(tvm.nd.array(inp)).numpy()
ref = np.transpose(inp, (3, 0, 1, 2))
np.testing.assert_equal(ref, out)
index_map = IndexMap.from_func(
lambda i0, i1, i2, i3: (
floordiv(i3, 32),
i0,
floordiv(i2, 8),
floordiv(floormod(i3, 32), 16),
i1,
floormod(i2, 8),
floormod(i3, 16),
)
)
kH = kW = 3
I = 64
O = 64
inp = np.random.randn(kH, kW, I, O).astype("float32")
arr = tvm.nd.array(inp)
out = index_map.map_ndarray(arr).numpy()
ref = np.zeros(out.shape).astype("float32")
for i0 in range(kH):
for i1 in range(kW):
for i2 in range(I):
for i3 in range(O):
v = inp[i0, i1, i2, i3]
ref[i3 // 32, i0, i2 // 8, (i3 % 32) // 16, i1, i2 % 8, i3 % 16] = v
np.testing.assert_equal(ref, out)
inverse_map = index_map.inverse(inp.shape)
np.testing.assert_equal(inverse_map.map_ndarray(index_map.map_ndarray(arr)).numpy(), inp)
if __name__ == "__main__":
tvm.testing.main()
| 10,187 | 35.12766 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_space_cuda_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for MetaSchedule search space on CUDA"""
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.script import tir as T
from tvm.target import Target
def _target():
return Target("nvidia/geforce-rtx-2080") # disable async trace using sm75
def _design_space(mod):
return generate_design_space(
kind="cuda",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cuda_nhwc():
# fmt: off
@T.prim_func
def cuda_nhwc_0(data: T.Buffer((1, 14, 14, 128), "float32"), weight: T.Buffer((6, 6, 128, 128), "float32"), conv2d_winograd: T.Buffer((1, 12, 12, 128), "float32")) -> None:
T.func_attr({"global_symbol": "main", "layout_free_buffers": [1], "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
input_tile_local = T.alloc_buffer((6, 6, 9, 128), scope="local")
data_pack = T.alloc_buffer((6, 6, 9, 128))
bgemm = T.alloc_buffer((6, 6, 9, 128))
inverse = T.alloc_buffer((4, 4, 9, 128))
data_pack_local = T.alloc_buffer((6, 6, 9, 128), scope="local")
bgemm_local = T.alloc_buffer((6, 6, 9, 128), scope="local")
data_pack_shared = T.alloc_buffer((6, 6, 9, 128), scope="shared")
weight_shared = T.alloc_buffer((6, 6, 128, 128), scope="shared")
for p_0_ci_0_p_1_ci_1_fused_0 in T.thread_binding(2, thread="blockIdx.x"):
for p_0_ci_0_p_1_ci_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("input_tile"):
v_eps, v_nu = T.axis.remap("SS", [ax0, ax1])
v_p = T.axis.spatial(9, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) // 384 * 3 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 24 // 8 + ax2)
v_ci = T.axis.spatial(128, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 384 // 24 * 8 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 8 + ax3)
T.where(p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1 < 1152)
T.reads(data[v_p // 9, v_p % 9 // 3 * 4 + v_eps, v_p % 3 * 4 + v_nu, v_ci])
T.writes(input_tile_local[v_eps, v_nu, v_p, v_ci])
T.block_attr({"schedule_rule": "None"})
input_tile_local[v_eps, v_nu, v_p, v_ci] = T.if_then_else(0 <= v_p % 9 // 3 * 4 + v_eps and v_p % 9 // 3 * 4 + v_eps < 14 and 0 <= v_p % 3 * 4 + v_nu and v_p % 3 * 4 + v_nu < 14, data[v_p // 9, v_p % 9 // 3 * 4 + v_eps, v_p % 3 * 4 + v_nu, v_ci], T.float32(0))
for eps in T.unroll(6):
for nu in T.unroll(6):
for r_a in T.unroll(6):
for r_b in T.unroll(6):
with T.block("data_pack"):
v_eps, v_nu = T.axis.remap("SS", [eps, nu])
v_p = T.axis.spatial(9, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) // 384 * 3 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 24 // 8)
v_ci = T.axis.spatial(128, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 384 // 24 * 8 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 8)
v_r_a, v_r_b = T.axis.remap("RR", [r_a, r_b])
T.where(p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1 < 1152)
T.reads(input_tile_local[v_r_a, v_r_b, v_p, v_ci])
T.writes(data_pack_local[v_eps, v_nu, v_p, v_ci])
T.block_attr({"schedule_rule": "conv2d_nhwc_winograd_data_pack"})
with T.init():
data_pack_local[v_eps, v_nu, v_p, v_ci] = T.float32(0)
data_pack_local[v_eps, v_nu, v_p, v_ci] = data_pack_local[v_eps, v_nu, v_p, v_ci] + input_tile_local[v_r_a, v_r_b, v_p, v_ci] * T.Select(v_r_a % 6 == 5 and v_eps % 6 == 5, T.float32(1), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 4, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 3, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 2, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 1, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 0, T.float32(0), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 5, T.float32(1.5), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 4, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 3, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 2, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 1, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 0, T.float32(1), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 5, T.float32(-2), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 4, T.float32(-0.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 3, T.float32(2), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 2, T.float32(2.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 1, T.float32(0.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 0, T.float32(1.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 5, T.float32(-1.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 4, T.float32(-1), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 3, T.float32(-1), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 2, T.float32(0.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 1, T.float32(-2.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 0, T.float32(-2), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 5, T.float32(1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 4, T.float32(0.5), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 3, T.float32(-2), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 2, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 1, T.float32(1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 0, T.float32(-1.5), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 5, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 4, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 3, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 2, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 1, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(v_r_b % 6 == 5 and v_nu % 6 == 5, T.float32(1), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 4, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 3, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 2, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 1, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 0, T.float32(0), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 5, T.float32(1.5), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 4, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 3, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 2, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 1, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 0, T.float32(1), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 5, T.float32(-2), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 4, T.float32(-0.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 3, T.float32(2), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 2, T.float32(2.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 1, T.float32(0.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 0, T.float32(1.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 5, T.float32(-1.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 4, T.float32(-1), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 3, T.float32(-1), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 2, T.float32(0.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 1, T.float32(-2.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 0, T.float32(-2), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 5, T.float32(1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 4, T.float32(0.5), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 3, T.float32(-2), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 2, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 1, T.float32(1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 0, T.float32(-1.5), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 5, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 4, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 3, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 2, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 1, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("data_pack_local"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(9, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) // 384 * 3 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 24 // 8 + ax2)
v3 = T.axis.spatial(128, (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 384 // 24 * 8 + (p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1) % 8 + ax3)
T.where(p_0_ci_0_p_1_ci_1_fused_0 * 1024 + p_0_ci_0_p_1_ci_1_fused_1 < 1152)
T.reads(data_pack_local[v0, v1, v2, v3])
T.writes(data_pack[v0, v1, v2, v3])
data_pack[v0, v1, v2, v3] = data_pack_local[v0, v1, v2, v3]
for eps_0_nu_0_p_0_co_0_fused in T.thread_binding(96, thread="blockIdx.x"):
for eps_1_nu_1_p_1_co_1_fused in T.thread_binding(4, thread="vthread.x"):
for eps_2_nu_2_p_2_co_2_fused in T.thread_binding(27, thread="threadIdx.x"):
for ci_0 in range(8):
for ax0_ax1_ax2_ax3_fused in range(1728):
with T.block("data_pack_shared"):
v0 = T.axis.spatial(6, eps_0_nu_0_p_0_co_0_fused // 32 * 2 + ax0_ax1_ax2_ax3_fused // 864)
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 864 // 144)
v2 = T.axis.spatial(9, ax0_ax1_ax2_ax3_fused % 144 // 16)
v3 = T.axis.spatial(128, ci_0 * 16 + ax0_ax1_ax2_ax3_fused % 16)
T.reads(data_pack[v0, v1, v2, v3])
T.writes(data_pack_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
data_pack_shared[v0, v1, v2, v3] = data_pack[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in range(768):
with T.block("weight_shared"):
v0 = T.axis.spatial(6, eps_0_nu_0_p_0_co_0_fused // 32 * 2 + ax0_ax1_ax2_ax3_fused // 384)
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 384 // 64)
v2 = T.axis.spatial(128, eps_0_nu_0_p_0_co_0_fused % 32 * 4 + ax0_ax1_ax2_ax3_fused % 64 // 16)
v3 = T.axis.spatial(128, ci_0 * 16 + ax0_ax1_ax2_ax3_fused % 16)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for ci_1, eps_3, nu_3, p_3, co_3, ci_2, eps_4, nu_4, p_4, co_4 in T.grid(1, 2, 1, 1, 2, 16, 1, 1, 1, 1):
with T.block("bgemm"):
v_eps = T.axis.spatial(6, eps_0_nu_0_p_0_co_0_fused // 32 * 2 + eps_3 + eps_4)
v_nu = T.axis.spatial(6, eps_1_nu_1_p_1_co_1_fused // 2 * 3 + eps_2_nu_2_p_2_co_2_fused // 9 + nu_3 + nu_4)
v_p = T.axis.spatial(9, eps_2_nu_2_p_2_co_2_fused % 9 + p_3 + p_4)
v_co = T.axis.spatial(128, eps_0_nu_0_p_0_co_0_fused % 32 * 4 + eps_1_nu_1_p_1_co_1_fused % 2 * 2 + co_3 + co_4)
v_ci = T.axis.reduce(128, ci_0 * 16 + ci_1 * 16 + ci_2)
T.reads(data_pack_shared[v_eps, v_nu, v_p, v_ci], weight_shared[v_eps, v_nu, v_co, v_ci])
T.writes(bgemm_local[v_eps, v_nu, v_p, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS", "meta_schedule.write_cache_level": [3]})
with T.init():
bgemm_local[v_eps, v_nu, v_p, v_co] = T.float32(0)
bgemm_local[v_eps, v_nu, v_p, v_co] = bgemm_local[v_eps, v_nu, v_p, v_co] + data_pack_shared[v_eps, v_nu, v_p, v_ci] * weight_shared[v_eps, v_nu, v_co, v_ci]
for ax0, ax1, ax2, ax3 in T.grid(2, 1, 1, 2):
with T.block("bgemm_local"):
v0 = T.axis.spatial(6, eps_0_nu_0_p_0_co_0_fused // 32 * 2 + ax0)
v1 = T.axis.spatial(6, eps_1_nu_1_p_1_co_1_fused // 2 * 3 + eps_2_nu_2_p_2_co_2_fused // 9 + ax1)
v2 = T.axis.spatial(9, eps_2_nu_2_p_2_co_2_fused % 9 + ax2)
v3 = T.axis.spatial(128, eps_0_nu_0_p_0_co_0_fused % 32 * 4 + eps_1_nu_1_p_1_co_1_fused % 2 * 2 + ax3)
T.reads(bgemm_local[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_local[v0, v1, v2, v3]
for p_0_co_0_p_1_co_1_fused_0 in T.thread_binding(18, thread="blockIdx.x"):
for p_0_co_0_p_1_co_1_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
for vh in T.unroll(4):
for vw in T.unroll(4):
for r_a in T.unroll(6):
for r_b in T.unroll(6):
with T.block("inverse"):
v_vh, v_vw = T.axis.remap("SS", [vh, vw])
v_p = T.axis.spatial(9, (p_0_co_0_p_1_co_1_fused_0 * 64 + p_0_co_0_p_1_co_1_fused_1) // 384 * 3 + (p_0_co_0_p_1_co_1_fused_0 * 64 + p_0_co_0_p_1_co_1_fused_1) % 24 // 8)
v_co = T.axis.spatial(128, (p_0_co_0_p_1_co_1_fused_0 * 64 + p_0_co_0_p_1_co_1_fused_1) % 384 // 24 * 8 + (p_0_co_0_p_1_co_1_fused_0 * 64 + p_0_co_0_p_1_co_1_fused_1) % 8)
v_r_a, v_r_b = T.axis.remap("RR", [r_a, r_b])
T.reads(bgemm[v_r_a, v_r_b, v_p, v_co])
T.writes(inverse[v_vh, v_vw, v_p, v_co])
T.block_attr({"schedule_rule": "conv2d_nhwc_winograd_inverse"})
with T.init():
inverse[v_vh, v_vw, v_p, v_co] = T.float32(0)
inverse[v_vh, v_vw, v_p, v_co] = inverse[v_vh, v_vw, v_p, v_co] + bgemm[v_r_a, v_r_b, v_p, v_co] * T.Select(v_r_a % 6 == 5 and v_vh % 4 == 3, T.float32(1), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 2, T.float32(0), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 1, T.float32(0), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 0, T.float32(0), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 3, T.float32(-8), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 2, T.float32(4), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 1, T.float32(-2), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 3, T.float32(0.125), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 2, T.float32(0.25), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 1, T.float32(0.5), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 3, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 2, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 1, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 3, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 2, T.float32(1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 1, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 3, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 2, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 1, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(v_r_b % 6 == 5 and v_vw % 4 == 3, T.float32(1), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 2, T.float32(0), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 1, T.float32(0), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 0, T.float32(0), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 3, T.float32(-8), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 2, T.float32(4), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 1, T.float32(-2), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 3, T.float32(0.125), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 2, T.float32(0.25), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 1, T.float32(0.5), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 3, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 2, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 1, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 3, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 2, T.float32(1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 1, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 3, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 2, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 1, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for n_h_w_co_fused_0 in T.thread_binding(144, thread="blockIdx.x"):
for n_h_w_co_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("conv2d_winograd"):
v_n = T.axis.spatial(1, 0)
v_h = T.axis.spatial(12, (n_h_w_co_fused_0 * 128 + n_h_w_co_fused_1) // 1536)
v_w = T.axis.spatial(12, (n_h_w_co_fused_0 * 128 + n_h_w_co_fused_1) % 1536 // 128)
v_co = T.axis.spatial(128, (n_h_w_co_fused_0 * 128 + n_h_w_co_fused_1) % 128)
T.reads(inverse[v_h % 4, v_w % 4, v_n * 9 + v_h // 4 * 3 + v_w // 4, v_co])
T.writes(conv2d_winograd[v_n, v_h, v_w, v_co])
conv2d_winograd[v_n, v_h, v_w, v_co] = inverse[v_h % 4, v_w % 4, v_n * 9 + v_h // 4 * 3 + v_w // 4, v_co]
# fmt: on
decision_0 = [
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [16, 8]),
("SampleCategorical", 1),
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [16, 8]),
("SampleCategorical", 5),
("SamplePerfectTile", [3, 1, 1, 2, 1]),
("SamplePerfectTile", [1, 2, 3, 1, 1]),
("SamplePerfectTile", [1, 1, 9, 1, 1]),
("SamplePerfectTile", [32, 2, 1, 2, 1]),
("SamplePerfectTile", [8, 1, 16]),
("SampleCategorical", 0),
("SampleCategorical", 2),
("SampleCategorical", 1),
("SampleCategorical", 2),
]
with _target():
mod = create_te_workload("C2D_WIN_NHWC", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_nhwc_0],
expected_decisions=[decision_0],
)
def test_cuda_nchw():
# fmt: off
@T.prim_func
def cuda_nchw_0(data: T.Buffer((1, 64, 56, 56), "float32"), weight: T.Buffer((6, 6, 64, 64), "float32"), conv2d_winograd: T.Buffer((1, 64, 56, 56), "float32")) -> None:
T.func_attr({"global_symbol": "main", "layout_free_buffers": [1], "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
input_tile_local = T.alloc_buffer((64, 196, 6, 6), scope="local")
data_pack = T.alloc_buffer((6, 6, 64, 196))
bgemm = T.alloc_buffer((6, 6, 64, 196))
inverse_local = T.alloc_buffer((64, 196, 4, 4), scope="local")
data_pack_local = T.alloc_buffer((6, 6, 64, 196), scope="local")
bgemm_local = T.alloc_buffer((6, 6, 64, 196), scope="local")
data_pack_shared = T.alloc_buffer((6, 6, 64, 196), scope="shared")
weight_shared = T.alloc_buffer((6, 6, 64, 64), scope="shared")
for ci_p_fused_0 in T.thread_binding(25, thread="blockIdx.x"):
for ci_p_fused_1 in T.thread_binding(512, thread="threadIdx.x"):
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 6, 6):
with T.block("input_tile"):
v_ci = T.axis.spatial(64, (ci_p_fused_0 * 512 + ci_p_fused_1) // 196 + ax0)
v_p = T.axis.spatial(196, (ci_p_fused_0 * 120 + ci_p_fused_1) % 196 + ax1)
v_eps, v_nu = T.axis.remap("SS", [ax2, ax3])
T.where(ci_p_fused_0 * 512 + ci_p_fused_1 < 12544)
T.reads(data[v_p // 196, v_ci, v_p % 196 // 14 * 4 + v_eps - 1, v_p % 14 * 4 + v_nu - 1])
T.writes(input_tile_local[v_ci, v_p, v_eps, v_nu])
T.block_attr({"schedule_rule": "None"})
input_tile_local[v_ci, v_p, v_eps, v_nu] = T.if_then_else(1 <= v_p % 196 // 14 * 4 + v_eps and v_p % 196 // 14 * 4 + v_eps < 57 and 1 <= v_p % 14 * 4 + v_nu and v_p % 14 * 4 + v_nu < 57, data[v_p // 196, v_ci, v_p % 196 // 14 * 4 + v_eps - 1, v_p % 14 * 4 + v_nu - 1], T.float32(0))
for eps in T.unroll(6):
for nu in T.unroll(6):
for r_a in T.unroll(6):
for r_b in T.unroll(6):
with T.block("data_pack"):
v_eps, v_nu = T.axis.remap("SS", [eps, nu])
v_ci = T.axis.spatial(64, (ci_p_fused_0 * 512 + ci_p_fused_1) // 196)
v_p = T.axis.spatial(196, (ci_p_fused_0 * 512 + ci_p_fused_1) % 196)
v_r_a, v_r_b = T.axis.remap("RR", [r_a, r_b])
T.where(ci_p_fused_0 * 512 + ci_p_fused_1 < 12544)
T.reads(input_tile_local[v_ci, v_p, v_r_a, v_r_b])
T.writes(data_pack_local[v_eps, v_nu, v_ci, v_p])
T.block_attr({"schedule_rule": "conv2d_nchw_winograd_data_pack"})
with T.init():
data_pack_local[v_eps, v_nu, v_ci, v_p] = T.float32(0)
data_pack_local[v_eps, v_nu, v_ci, v_p] = data_pack_local[v_eps, v_nu, v_ci, v_p] + input_tile_local[v_ci, v_p, v_r_a, v_r_b] * T.Select(v_r_a % 6 == 5 and v_eps % 6 == 5, T.float32(1), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 4, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 3, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 2, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 1, T.float32(0), T.Select(v_r_a % 6 == 5 and v_eps % 6 == 0, T.float32(0), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 5, T.float32(1.5), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 4, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 3, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 2, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 1, T.float32(1), T.Select(v_r_a % 6 == 4 and v_eps % 6 == 0, T.float32(1), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 5, T.float32(-2), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 4, T.float32(-0.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 3, T.float32(2), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 2, T.float32(2.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 1, T.float32(0.5), T.Select(v_r_a % 6 == 3 and v_eps % 6 == 0, T.float32(1.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 5, T.float32(-1.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 4, T.float32(-1), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 3, T.float32(-1), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 2, T.float32(0.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 1, T.float32(-2.5), T.Select(v_r_a % 6 == 2 and v_eps % 6 == 0, T.float32(-2), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 5, T.float32(1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 4, T.float32(0.5), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 3, T.float32(-2), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 2, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 1, T.float32(1), T.Select(v_r_a % 6 == 1 and v_eps % 6 == 0, T.float32(-1.5), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 5, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 4, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 3, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 2, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 1, T.float32(0), T.Select(v_r_a % 6 == 0 and v_eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(v_r_b % 6 == 5 and v_nu % 6 == 5, T.float32(1), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 4, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 3, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 2, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 1, T.float32(0), T.Select(v_r_b % 6 == 5 and v_nu % 6 == 0, T.float32(0), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 5, T.float32(1.5), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 4, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 3, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 2, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 1, T.float32(1), T.Select(v_r_b % 6 == 4 and v_nu % 6 == 0, T.float32(1), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 5, T.float32(-2), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 4, T.float32(-0.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 3, T.float32(2), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 2, T.float32(2.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 1, T.float32(0.5), T.Select(v_r_b % 6 == 3 and v_nu % 6 == 0, T.float32(1.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 5, T.float32(-1.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 4, T.float32(-1), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 3, T.float32(-1), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 2, T.float32(0.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 1, T.float32(-2.5), T.Select(v_r_b % 6 == 2 and v_nu % 6 == 0, T.float32(-2), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 5, T.float32(1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 4, T.float32(0.5), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 3, T.float32(-2), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 2, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 1, T.float32(1), T.Select(v_r_b % 6 == 1 and v_nu % 6 == 0, T.float32(-1.5), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 5, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 4, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 3, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 2, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 1, T.float32(0), T.Select(v_r_b % 6 == 0 and v_nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("data_pack_local"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(64, (ci_p_fused_0 * 512 + ci_p_fused_1) // 196 + ax2)
v3 = T.axis.spatial(196, (ci_p_fused_0 * 120 + ci_p_fused_1) % 196 + ax3)
T.where(ci_p_fused_0 * 512 + ci_p_fused_1 < 12544)
T.reads(data_pack_local[v0, v1, v2, v3])
T.writes(data_pack[v0, v1, v2, v3])
data_pack[v0, v1, v2, v3] = data_pack_local[v0, v1, v2, v3]
for eps_0_nu_0_co_0_p_0_fused in T.thread_binding(14, thread="blockIdx.x"):
for eps_1_nu_1_co_1_p_1_fused in T.thread_binding(224, thread="vthread.x"):
for eps_2_nu_2_co_2_p_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for ci_0 in range(2):
for ax0_ax1_ax2_ax3_fused in range(32256):
with T.block("data_pack_shared"):
v0 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused // 5376)
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 5376 // 896)
v2 = T.axis.spatial(64, ci_0 * 32 + ax0_ax1_ax2_ax3_fused % 896 // 28)
v3 = T.axis.spatial(196, eps_0_nu_0_co_0_p_0_fused % 7 * 28 + ax0_ax1_ax2_ax3_fused % 28)
T.reads(data_pack[v0, v1, v2, v3])
T.writes(data_pack_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
data_pack_shared[v0, v1, v2, v3] = data_pack[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in range(36864):
with T.block("weight_shared"):
v0 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused // 6144)
v1 = T.axis.spatial(6, ax0_ax1_ax2_ax3_fused % 6144 // 1024)
v2 = T.axis.spatial(64, ci_0 * 32 + ax0_ax1_ax2_ax3_fused % 1024 // 32)
v3 = T.axis.spatial(64, eps_0_nu_0_co_0_p_0_fused // 7 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for ci_1, eps_3, nu_3, co_3, p_3, ci_2, eps_4, nu_4, co_4, p_4 in T.grid(16, 2, 3, 1, 4, 2, 3, 1, 1, 1):
with T.block("bgemm"):
v_eps = T.axis.spatial(6, eps_3 * 3 + eps_4)
v_nu = T.axis.spatial(6, eps_1_nu_1_co_1_p_1_fused // 112 * 3 + nu_3 + nu_4)
v_co = T.axis.spatial(64, eps_0_nu_0_co_0_p_0_fused // 7 * 32 + eps_1_nu_1_co_1_p_1_fused % 112 // 7 * 2 + eps_2_nu_2_co_2_p_2_fused + co_3 + co_4)
v_p = T.axis.spatial(196, eps_0_nu_0_co_0_p_0_fused % 7 * 28 + eps_1_nu_1_co_1_p_1_fused % 7 * 4 + p_3 + p_4)
v_ci = T.axis.reduce(64, ci_0 * 32 + ci_1 * 2 + ci_2)
T.reads(data_pack_shared[v_eps, v_nu, v_ci, v_p], weight_shared[v_eps, v_nu, v_ci, v_co])
T.writes(bgemm_local[v_eps, v_nu, v_co, v_p])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
bgemm_local[v_eps, v_nu, v_co, v_p] = T.float32(0)
bgemm_local[v_eps, v_nu, v_co, v_p] = bgemm_local[v_eps, v_nu, v_co, v_p] + data_pack_shared[v_eps, v_nu, v_ci, v_p] * weight_shared[v_eps, v_nu, v_ci, v_co]
for ax0, ax1, ax2, ax3 in T.grid(6, 3, 1, 4):
with T.block("bgemm_local"):
v0 = T.axis.spatial(6, ax0)
v1 = T.axis.spatial(6, eps_1_nu_1_co_1_p_1_fused // 112 * 3 + ax1)
v2 = T.axis.spatial(64, eps_0_nu_0_co_0_p_0_fused // 7 * 32 + eps_1_nu_1_co_1_p_1_fused % 112 // 7 * 2 + eps_2_nu_2_co_2_p_2_fused + ax2)
v3 = T.axis.spatial(196, eps_0_nu_0_co_0_p_0_fused % 7 * 28 + eps_1_nu_1_co_1_p_1_fused % 7 * 4 + ax3)
T.reads(bgemm_local[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_local[v0, v1, v2, v3]
for n_co_h_0_w_0_fused_0 in T.thread_binding(196, thread="blockIdx.x"):
for n_co_h_0_w_0_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
for ax0, ax1 in T.grid(1, 1):
for ax2 in T.unroll(4):
for ax3 in T.unroll(4):
for ax4 in T.unroll(6):
for ax5 in T.unroll(6):
with T.block("inverse"):
v_co = T.axis.spatial(64, (n_co_h_0_w_0_fused_0 * 64 + n_co_h_0_w_0_fused_1) // 196 + ax0)
v_p = T.axis.spatial(196, (n_co_h_0_w_0_fused_0 * 64 + n_co_h_0_w_0_fused_1) % 196 + ax1)
v_vh, v_vw, v_r_a, v_r_b = T.axis.remap("SSRR", [ax2, ax3, ax4, ax5])
T.reads(bgemm[v_r_a, v_r_b, v_co, v_p])
T.writes(inverse_local[v_co, v_p, v_vh, v_vw])
T.block_attr({"schedule_rule": "conv2d_nchw_winograd_inverse"})
with T.init():
inverse_local[v_co, v_p, v_vh, v_vw] = T.float32(0)
inverse_local[v_co, v_p, v_vh, v_vw] = inverse_local[v_co, v_p, v_vh, v_vw] + bgemm[v_r_a, v_r_b, v_co, v_p] * T.Select(v_r_a % 6 == 5 and v_vh % 4 == 3, T.float32(1), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 2, T.float32(0), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 1, T.float32(0), T.Select(v_r_a % 6 == 5 and v_vh % 4 == 0, T.float32(0), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 3, T.float32(-8), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 2, T.float32(4), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 1, T.float32(-2), T.Select(v_r_a % 6 == 4 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 3, T.float32(0.125), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 2, T.float32(0.25), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 1, T.float32(0.5), T.Select(v_r_a % 6 == 3 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 3, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 2, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 1, T.float32(1), T.Select(v_r_a % 6 == 2 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 3, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 2, T.float32(1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 1, T.float32(-1), T.Select(v_r_a % 6 == 1 and v_vh % 4 == 0, T.float32(1), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 3, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 2, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 1, T.float32(0), T.Select(v_r_a % 6 == 0 and v_vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(v_r_b % 6 == 5 and v_vw % 4 == 3, T.float32(1), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 2, T.float32(0), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 1, T.float32(0), T.Select(v_r_b % 6 == 5 and v_vw % 4 == 0, T.float32(0), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 3, T.float32(-8), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 2, T.float32(4), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 1, T.float32(-2), T.Select(v_r_b % 6 == 4 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 3, T.float32(0.125), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 2, T.float32(0.25), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 1, T.float32(0.5), T.Select(v_r_b % 6 == 3 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 3, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 2, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 1, T.float32(1), T.Select(v_r_b % 6 == 2 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 3, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 2, T.float32(1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 1, T.float32(-1), T.Select(v_r_b % 6 == 1 and v_vw % 4 == 0, T.float32(1), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 3, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 2, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 1, T.float32(0), T.Select(v_r_b % 6 == 0 and v_vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for h_1, w_1 in T.grid(4, 4):
with T.block("conv2d_winograd"):
v_n = T.axis.spatial(1, 0)
v_co = T.axis.spatial(64, (n_co_h_0_w_0_fused_0 * 64 + n_co_h_0_w_0_fused_1) // 196)
v_h = T.axis.spatial(56, (n_co_h_0_w_0_fused_0 * 64 + n_co_h_0_w_0_fused_1) % 196 // 14 * 4 + h_1)
v_w = T.axis.spatial(56, (n_co_h_0_w_0_fused_0 * 64 + n_co_h_0_w_0_fused_1) % 14 * 4 + w_1)
T.reads(inverse_local[v_co, v_n * 196 + v_h // 4 * 14 + v_w // 4, v_h % 4, v_w % 4])
T.writes(conv2d_winograd[v_n, v_co, v_h, v_w])
conv2d_winograd[v_n, v_co, v_h, v_w] = inverse_local[v_co, v_n * 196 + v_h // 4 * 14 + v_w // 4, v_h % 4, v_w % 4]
# fmt: on
decision_0 = [
("SampleCategorical", 4),
("SamplePerfectTile", [1, 1, 1, 2, 3]),
("SamplePerfectTile", [1, 2, 1, 3, 1]),
("SamplePerfectTile", [2, 16, 2, 1, 1]),
("SamplePerfectTile", [7, 7, 1, 4, 1]),
("SamplePerfectTile", [2, 16, 2]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
("SampleCategorical", 1),
]
with _target():
mod = create_te_workload("C2D_WIN_NCHW", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_nchw_0],
expected_decisions=[decision_0],
debug_mask=0,
)
def test_cuda_nchw_add_relu():
# fmt: off
@T.prim_func
def nchw_add_relu(p0: T.Buffer((2, 2048, 50, 75), "float32"), p1: T.Buffer((4, 4, 2048, 2048), "float32"), p2: T.Buffer((1, 2048, 1, 1), "float32"), T_relu: T.Buffer((2, 2048, 50, 75), "float32")):
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
data_pad = T.alloc_buffer([2, 2048, 52, 77], dtype="float32")
input_tile = T.alloc_buffer([2048, 1900, 4, 4], dtype="float32")
B = T.alloc_buffer([4, 4], dtype="float32")
data_pack = T.alloc_buffer([4, 4, 2048, 1900], dtype="float32")
bgemm = T.alloc_buffer([4, 4, 2048, 1900], dtype="float32")
A = T.alloc_buffer([4, 2], dtype="float32")
inverse = T.alloc_buffer([2048, 1900, 2, 2], dtype="float32")
conv2d_winograd = T.alloc_buffer([2, 2048, 50, 75], dtype="float32")
T_add = T.alloc_buffer([2, 2048, 50, 75], dtype="float32")
for i0, i1, i2, i3 in T.grid(2, 2048, 52, 77):
with T.block("data_pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1 - 1, i3_1 - 1])
T.writes(data_pad[i0_1, i1_1, i2_1, i3_1])
data_pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i2_1 and i2_1 < 51 and 1 <= i3_1 and i3_1 < 76, p0[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32")
for i0, i1, i2, i3 in T.grid(2048, 1900, 4, 4):
with T.block("input_tile"):
ci, p, eps, nu = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data_pad[p // 950, ci, p % 950 // 38 * 2 + eps, p % 38 * 2 + nu])
T.writes(input_tile[ci, p, eps, nu])
T.block_attr({"schedule_rule":"None"})
input_tile[ci, p, eps, nu] = data_pad[p // 950, ci, p % 950 // 38 * 2 + eps, p % 38 * 2 + nu]
for i0, i1 in T.grid(4, 4):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(B[i, j])
T.block_attr({"schedule_rule":"None"})
B[i, j] = T.Select(i % 4 == 3 and j % 4 == 3, T.float32(1), T.Select(i % 4 == 3 and j % 4 == 2, T.float32(0), T.Select(i % 4 == 3 and j % 4 == 1, T.float32(0), T.Select(i % 4 == 3 and j % 4 == 0, T.float32(0), T.Select(i % 4 == 2 and j % 4 == 3, T.float32(0), T.Select(i % 4 == 2 and j % 4 == 2, T.float32(1), T.Select(i % 4 == 2 and j % 4 == 1, T.float32(1), T.Select(i % 4 == 2 and j % 4 == 0, T.float32(-1), T.Select(i % 4 == 1 and j % 4 == 3, T.float32(-1), T.Select(i % 4 == 1 and j % 4 == 2, T.float32(1), T.Select(i % 4 == 1 and j % 4 == 1, T.float32(-1), T.Select(i % 4 == 1 and j % 4 == 0, T.float32(0), T.Select(i % 4 == 0 and j % 4 == 3, T.float32(0), T.Select(i % 4 == 0 and j % 4 == 2, T.float32(0), T.Select(i % 4 == 0 and j % 4 == 1, T.float32(0), T.Select(i % 4 == 0 and j % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(4, 4, 2048, 1900, 4, 4):
with T.block("data_pack"):
eps, nu, ci, p, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(input_tile[ci, p, r_a, r_b], B[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(eps, nu) : T.max(eps, nu) + 1])
T.writes(data_pack[eps, nu, ci, p])
T.block_attr({"schedule_rule":"conv2d_nchw_winograd_data_pack"})
with T.init():
data_pack[eps, nu, ci, p] = T.float32(0)
data_pack[eps, nu, ci, p] = data_pack[eps, nu, ci, p] + input_tile[ci, p, r_a, r_b] * B[r_a, eps] * B[r_b, nu]
for i0, i1, i2, i3, i4 in T.grid(4, 4, 2048, 1900, 2048):
with T.block("bgemm"):
eps, nu, co, p, ci = T.axis.remap("SSSSR", [i0, i1, i2, i3, i4])
T.reads(data_pack[eps, nu, ci, p], p1[eps, nu, ci, co])
T.writes(bgemm[eps, nu, co, p])
with T.init():
bgemm[eps, nu, co, p] = T.float32(0)
bgemm[eps, nu, co, p] = bgemm[eps, nu, co, p] + data_pack[eps, nu, ci, p] * p1[eps, nu, ci, co]
for i0, i1 in T.grid(4, 2):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(A[i, j])
T.block_attr({"schedule_rule":"None"})
A[i, j] = T.Select(i % 4 == 3 and j % 2 == 1, T.float32(1), T.Select(i % 4 == 3 and j % 2 == 0, T.float32(0), T.Select(i % 4 == 2 and j % 2 == 1, T.float32(1), T.Select(i % 4 == 2 and j % 2 == 0, T.float32(1), T.Select(i % 4 == 1 and j % 2 == 1, T.float32(-1), T.Select(i % 4 == 1 and j % 2 == 0, T.float32(1), T.Select(i % 4 == 0 and j % 2 == 1, T.float32(0), T.Select(i % 4 == 0 and j % 2 == 0, T.float32(1), T.float32(0)))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(2048, 1900, 2, 2, 4, 4):
with T.block("inverse"):
co, p, vh, vw, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(bgemm[r_a, r_b, co, p], A[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(vh, vw) : T.max(vh, vw) + 1])
T.writes(inverse[co, p, vh, vw])
T.block_attr({"schedule_rule":"conv2d_nchw_winograd_inverse"})
with T.init():
inverse[co, p, vh, vw] = T.float32(0)
inverse[co, p, vh, vw] = inverse[co, p, vh, vw] + bgemm[r_a, r_b, co, p] * A[r_a, vh] * A[r_b, vw]
for i0, i1, i2, i3 in T.grid(2, 2048, 50, 75):
with T.block("conv2d_winograd"):
n, co, h, w = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inverse[co, n * 950 + h // 2 * 38 + w // 2, h % 2, w % 2])
T.writes(conv2d_winograd[n, co, h, w])
conv2d_winograd[n, co, h, w] = inverse[co, n * 950 + h // 2 * 38 + w // 2, h % 2, w % 2]
for i0, i1, i2, i3 in T.grid(2, 2048, 50, 75):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_winograd[ax0, ax1, ax2, ax3], p2[0, ax1, 0, 0])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = conv2d_winograd[ax0, ax1, ax2, ax3] + p2[0, ax1, 0, 0]
for i0, i1, i2, i3 in T.grid(2, 2048, 50, 75):
with T.block("T_relu"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[ax0, ax1, ax2, ax3])
T.writes(T_relu[ax0, ax1, ax2, ax3])
T_relu[ax0, ax1, ax2, ax3] = T.max(T_add[ax0, ax1, ax2, ax3], T.float32(0))
@T.prim_func
def nchw_add_relu_scheduled(p0: T.Buffer((2, 2048, 50, 75), "float32"), p1: T.Buffer((4, 4, 2048, 2048), "float32"), p2: T.Buffer((1, 2048, 1, 1), "float32"), T_relu: T.Buffer((2, 2048, 50, 75), "float32")):
T.func_attr({"global_symbol": "main", "layout_free_buffers": [1], "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 1024})
input_tile_local = T.alloc_buffer((2048, 1900, 4, 4), scope="local")
data_pack = T.alloc_buffer((4, 4, 2048, 1900))
bgemm = T.alloc_buffer((4, 4, 2048, 1900))
inverse_local = T.alloc_buffer((2048, 1900, 2, 2), scope="local")
data_pack_local = T.alloc_buffer((4, 4, 2048, 1900), scope="local")
bgemm_local = T.alloc_buffer((4, 4, 2048, 1900), scope="local")
data_pack_shared = T.alloc_buffer((4, 4, 2048, 1900), scope="shared")
p1_shared = T.alloc_buffer((4, 4, 2048, 2048), scope="shared")
for i2_i3_fused_1 in T.thread_binding(256, thread="blockIdx.x"):
for i2_i3_fused_2 in T.thread_binding(1024, thread="threadIdx.x"):
for i2_i3_fused_0 in range(15):
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 4, 4):
with T.block("input_tile"):
ci = T.axis.spatial(2048, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) // 1900 + ax0)
p = T.axis.spatial(1900, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) % 1900 + ax1)
eps, nu = T.axis.remap("SS", [ax2, ax3])
T.where(i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2 < 3891200)
T.reads(p0[p // 950, ci, p % 950 // 38 * 2 + eps - 1, p % 38 * 2 + nu - 1])
T.writes(input_tile_local[ci, p, eps, nu])
T.block_attr({"schedule_rule": "None"})
input_tile_local[ci, p, eps, nu] = T.if_then_else(1 <= p % 950 // 38 * 2 + eps and p % 950 // 38 * 2 + eps < 51 and 1 <= p % 38 * 2 + nu and p % 38 * 2 + nu < 76, p0[p // 950, ci, p % 950 // 38 * 2 + eps - 1, p % 38 * 2 + nu - 1], T.float32(0))
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(4):
for i5 in T.unroll(4):
with T.block("data_pack"):
eps, nu = T.axis.remap("SS", [i0, i1])
ci = T.axis.spatial(2048, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) // 1900)
p = T.axis.spatial(1900, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) % 1900)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.where((i2_i3_fused_0 * 256 + i2_i3_fused_1) * 1024 + i2_i3_fused_2 < 3891200)
T.reads(input_tile_local[ci, p, r_a, r_b])
T.writes(data_pack_local[eps, nu, ci, p])
T.block_attr({"schedule_rule": "conv2d_nchw_winograd_data_pack"})
with T.init():
data_pack_local[eps, nu, ci, p] = T.float32(0)
data_pack_local[eps, nu, ci, p] = data_pack_local[eps, nu, ci, p] + input_tile_local[ci, p, r_a, r_b] * T.Select(r_a % 4 == 3 and eps % 4 == 3, T.float32(1), T.Select(r_a % 4 == 3 and eps % 4 == 2, T.float32(0), T.Select(r_a % 4 == 3 and eps % 4 == 1, T.float32(0), T.Select(r_a % 4 == 3 and eps % 4 == 0, T.float32(0), T.Select(r_a % 4 == 2 and eps % 4 == 3, T.float32(0), T.Select(r_a % 4 == 2 and eps % 4 == 2, T.float32(1), T.Select(r_a % 4 == 2 and eps % 4 == 1, T.float32(1), T.Select(r_a % 4 == 2 and eps % 4 == 0, T.float32(-1), T.Select(r_a % 4 == 1 and eps % 4 == 3, T.float32(-1), T.Select(r_a % 4 == 1 and eps % 4 == 2, T.float32(1), T.Select(r_a % 4 == 1 and eps % 4 == 1, T.float32(-1), T.Select(r_a % 4 == 1 and eps % 4 == 0, T.float32(0), T.Select(r_a % 4 == 0 and eps % 4 == 3, T.float32(0), T.Select(r_a % 4 == 0 and eps % 4 == 2, T.float32(0), T.Select(r_a % 4 == 0 and eps % 4 == 1, T.float32(0), T.Select(r_a % 4 == 0 and eps % 4 == 0, T.float32(1), T.float32(0))))))))))))))))) * T.Select(r_b % 4 == 3 and nu % 4 == 3, T.float32(1), T.Select(r_b % 4 == 3 and nu % 4 == 2, T.float32(0), T.Select(r_b % 4 == 3 and nu % 4 == 1, T.float32(0), T.Select(r_b % 4 == 3 and nu % 4 == 0, T.float32(0), T.Select(r_b % 4 == 2 and nu % 4 == 3, T.float32(0), T.Select(r_b % 4 == 2 and nu % 4 == 2, T.float32(1), T.Select(r_b % 4 == 2 and nu % 4 == 1, T.float32(1), T.Select(r_b % 4 == 2 and nu % 4 == 0, T.float32(-1), T.Select(r_b % 4 == 1 and nu % 4 == 3, T.float32(-1), T.Select(r_b % 4 == 1 and nu % 4 == 2, T.float32(1), T.Select(r_b % 4 == 1 and nu % 4 == 1, T.float32(-1), T.Select(r_b % 4 == 1 and nu % 4 == 0, T.float32(0), T.Select(r_b % 4 == 0 and nu % 4 == 3, T.float32(0), T.Select(r_b % 4 == 0 and nu % 4 == 2, T.float32(0), T.Select(r_b % 4 == 0 and nu % 4 == 1, T.float32(0), T.Select(r_b % 4 == 0 and nu % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))
for ax0, ax1, ax2, ax3 in T.grid(4, 4, 1, 1):
with T.block("data_pack_local"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2 = T.axis.spatial(2048, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) // 1900 + ax2)
v3 = T.axis.spatial(1900, (i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2) % 1900 + ax3)
T.where(i2_i3_fused_0 * 262144 + i2_i3_fused_1 * 1024 + i2_i3_fused_2 < 3891200)
T.reads(data_pack_local[v0, v1, v2, v3])
T.writes(data_pack[v0, v1, v2, v3])
data_pack[v0, v1, v2, v3] = data_pack_local[v0, v1, v2, v3]
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(24320, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(64, thread="threadIdx.x"):
for i4_0 in range(256):
for ax0_ax1_ax2_ax3_fused in range(640):
with T.block("data_pack_shared"):
v0 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused // 12160 * 2 + ax0_ax1_ax2_ax3_fused // 320)
v1 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused % 12160 // 6080 * 2 + ax0_ax1_ax2_ax3_fused % 320 // 160)
v2 = T.axis.spatial(2048, i4_0 * 8 + ax0_ax1_ax2_ax3_fused % 160 // 20)
v3 = T.axis.spatial(1900, i0_0_i1_0_i2_0_i3_0_fused % 95 * 20 + ax0_ax1_ax2_ax3_fused % 20)
T.reads(data_pack[v0, v1, v2, v3])
T.writes(data_pack_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
data_pack_shared[v0, v1, v2, v3] = data_pack[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in range(1024):
with T.block("p1_shared"):
v0 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused // 12160 * 2 + ax0_ax1_ax2_ax3_fused // 512)
v1 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused % 12160 // 6080 * 2 + ax0_ax1_ax2_ax3_fused % 512 // 256)
v2 = T.axis.spatial(2048, i4_0 * 8 + ax0_ax1_ax2_ax3_fused % 256 // 32)
v3 = T.axis.spatial(2048, i0_0_i1_0_i2_0_i3_0_fused % 6080 // 95 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(p1[v0, v1, v2, v3])
T.writes(p1_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
p1_shared[v0, v1, v2, v3] = p1[v0, v1, v2, v3]
for i4_1, i0_3, i1_3, i2_3, i3_3, i4_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 1, 2, 1, 1, 8, 1, 1, 2, 5):
with T.block("bgemm"):
eps = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused // 12160 * 2 + i0_2_i1_2_i2_2_i3_2_fused // 32 + i0_3 + i0_4)
nu = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused % 12160 // 6080 * 2 + i1_3 + i1_4)
co = T.axis.spatial(2048, i0_0_i1_0_i2_0_i3_0_fused % 6080 // 95 * 32 + i0_1_i1_1_i2_1_i3_1_fused * 16 + i0_2_i1_2_i2_2_i3_2_fused % 32 // 4 * 2 + i2_3 * 2 + i2_4)
p = T.axis.spatial(1900, i0_0_i1_0_i2_0_i3_0_fused % 95 * 20 + i0_2_i1_2_i2_2_i3_2_fused % 4 * 5 + i3_3 * 5 + i3_4)
ci = T.axis.reduce(2048, i4_0 * 8 + i4_1 * 8 + i4_2)
T.reads(data_pack_shared[eps, nu, ci, p], p1_shared[eps, nu, ci, co])
T.writes(bgemm_local[eps, nu, co, p])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
bgemm_local[eps, nu, co, p] = T.float32(0)
bgemm_local[eps, nu, co, p] = bgemm_local[eps, nu, co, p] + data_pack_shared[eps, nu, ci, p] * p1_shared[eps, nu, ci, co]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 2, 5):
with T.block("bgemm_local"):
v0 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused // 12160 * 2 + i0_2_i1_2_i2_2_i3_2_fused // 32 + ax0)
v1 = T.axis.spatial(4, i0_0_i1_0_i2_0_i3_0_fused % 12160 // 6080 * 2 + ax1)
v2 = T.axis.spatial(2048, i0_0_i1_0_i2_0_i3_0_fused % 6080 // 95 * 32 + i0_1_i1_1_i2_1_i3_1_fused * 16 + i0_2_i1_2_i2_2_i3_2_fused % 32 // 4 * 2 + ax2)
v3 = T.axis.spatial(1900, i0_0_i1_0_i2_0_i3_0_fused % 95 * 20 + i0_2_i1_2_i2_2_i3_2_fused % 4 * 5 + ax3)
T.reads(bgemm_local[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_local[v0, v1, v2, v3]
for i0_i1_i2_0_i3_0_fused_1 in T.thread_binding(256, thread="blockIdx.x"):
for i0_i1_i2_0_i3_0_fused_2 in T.thread_binding(1024, thread="threadIdx.x"):
for i0_i1_i2_0_i3_0_fused_0 in range(15):
for ax0, ax1 in T.grid(1, 1):
for ax2 in T.unroll(2):
for ax3 in T.unroll(2):
for ax4 in T.unroll(4):
for ax5 in T.unroll(4):
with T.block("inverse"):
co = T.axis.spatial(2048, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) % 1945600 // 950 + ax0)
p = T.axis.spatial(1900, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) // 1945600 * 950 + (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) % 950 + ax1)
vh, vw, r_a, r_b = T.axis.remap("SSRR", [ax2, ax3, ax4, ax5])
T.where((i0_i1_i2_0_i3_0_fused_0 * 256 + i0_i1_i2_0_i3_0_fused_1) * 1024 + i0_i1_i2_0_i3_0_fused_2 < 3891200)
T.reads(bgemm[r_a, r_b, co, p])
T.writes(inverse_local[co, p, vh, vw])
T.block_attr({"schedule_rule": "conv2d_nchw_winograd_inverse"})
with T.init():
inverse_local[co, p, vh, vw] = T.float32(0)
inverse_local[co, p, vh, vw] = inverse_local[co, p, vh, vw] + bgemm[r_a, r_b, co, p] * T.Select(r_a % 4 == 3 and vh % 2 == 1, T.float32(1), T.Select(r_a % 4 == 3 and vh % 2 == 0, T.float32(0), T.Select(r_a % 4 == 2 and vh % 2 == 1, T.float32(1), T.Select(r_a % 4 == 2 and vh % 2 == 0, T.float32(1), T.Select(r_a % 4 == 1 and vh % 2 == 1, T.float32(-1), T.Select(r_a % 4 == 1 and vh % 2 == 0, T.float32(1), T.Select(r_a % 4 == 0 and vh % 2 == 1, T.float32(0), T.Select(r_a % 4 == 0 and vh % 2 == 0, T.float32(1), T.float32(0))))))))) * T.Select(r_b % 4 == 3 and vw % 2 == 1, T.float32(1), T.Select(r_b % 4 == 3 and vw % 2 == 0, T.float32(0), T.Select(r_b % 4 == 2 and vw % 2 == 1, T.float32(1), T.Select(r_b % 4 == 2 and vw % 2 == 0, T.float32(1), T.Select(r_b % 4 == 1 and vw % 2 == 1, T.float32(-1), T.Select(r_b % 4 == 1 and vw % 2 == 0, T.float32(1), T.Select(r_b % 4 == 0 and vw % 2 == 1, T.float32(0), T.Select(r_b % 4 == 0 and vw % 2 == 0, T.float32(1), T.float32(0)))))))))
for i2_1, i3_1 in T.grid(2, 2):
with T.block("conv2d_winograd"):
n = T.axis.spatial(2, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) // 1945600)
co = T.axis.spatial(2048, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) % 1945600 // 950)
h = T.axis.spatial(50, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) % 950 // 38 * 2 + i2_1)
w = T.axis.spatial(75, (i0_i1_i2_0_i3_0_fused_0 * 262144 + i0_i1_i2_0_i3_0_fused_1 * 1024 + i0_i1_i2_0_i3_0_fused_2) % 38 * 2 + i3_1)
T.where(((i0_i1_i2_0_i3_0_fused_0 * 256 + i0_i1_i2_0_i3_0_fused_1) * 1024 + i0_i1_i2_0_i3_0_fused_2) % 38 * 2 + i3_1 < 75 and (i0_i1_i2_0_i3_0_fused_0 * 256 + i0_i1_i2_0_i3_0_fused_1) * 1024 + i0_i1_i2_0_i3_0_fused_2 < 3891200)
T.reads(inverse_local[co, n * 950 + h // 2 * 38 + w // 2, h % 2, w % 2], p2[0, co, 0, 0])
T.writes(T_relu[n, co, h, w])
T_relu[n, co, h, w] = T.max(inverse_local[co, n * 950 + h // 2 * 38 + w // 2, h % 2, w % 2] + p2[0, co, 0, 0], T.float32(0))
# fmt: on
decision_0 = [
("SamplePerfectTile", [2, 1, 2, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 2, 1]),
("SamplePerfectTile", [64, 2, 8, 1, 2]),
("SamplePerfectTile", [95, 1, 4, 1, 5]),
("SamplePerfectTile", [256, 1, 8]),
("SampleCategorical", 0),
("SampleCategorical", 3),
("SampleCategorical", 4),
]
with _target():
mod = nchw_add_relu
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[nchw_add_relu_scheduled],
expected_decisions=[decision_0],
debug_mask=0,
)
if __name__ == "__main__":
test_cuda_nhwc()
test_cuda_nchw()
test_cuda_nchw_add_relu()
| 63,137 | 105.292929 | 4,467 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_flop_calculator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test flop calculation"""
import tvm
from tvm import te
import numpy as np
from tvm.autotvm.task.task import compute_flop
def random_dtypes():
"""Return pair of (input, accumulator) dtypes"""
candidates = [("float32", "float32"), ("float16", "float32"), ("int8", "int32")]
return candidates[np.random.choice(len(candidates))]
def test_conv():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
K = te.placeholder((CO, CI, KH, KW), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
ci = te.reduce_axis((0, CI))
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
D[n][ci][h][w].astype(acc_dtype) * K[co][ci][h][w].astype(acc_dtype),
axis=[ci, kh, kw],
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * CI * KH * KW
def test_pack_gemm():
for i in range(5):
N, L, M = [np.random.randint(10, 128) * 4 for _ in range(3)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N, L), dtype=input_dtype)
B = te.placeholder((M, L), dtype=input_dtype)
k = te.reduce_axis((0, L))
bn = 4
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
A_pack = te.compute((N // bn, L, bn), lambda i, j, k: A[i * bn + k][j])
B_pack = te.compute((M // bn, L, bn), lambda i, j, k: B[i * bn + k][j])
C_pack = te.compute(
(N // bn, M // bn, bn, bn),
lambda i, j, ii, jj: te.sum(
A_pack[i, k, ii].astype(acc_dtype) * B_pack[j, k, jj].astype(acc_dtype), axis=[k]
),
)
C = te.compute(
(N, M), lambda i, j: C_pack[idxd(i, bn)][idxd(j, bn)][idxm(i, bn)][idxm(j, bn)]
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * L * M
def test_outer_dot():
for i in range(5):
N, M = [np.random.randint(10, 128) * 4 for _ in range(2)]
(input_dtype, acc_dtype) = random_dtypes()
A = te.placeholder((N,), dtype=input_dtype)
B = te.placeholder((M,), dtype=input_dtype)
C = te.compute((N, M), lambda i, j: A[i].astype(acc_dtype) * B[j].astype(acc_dtype))
s = te.create_schedule([C.op])
assert compute_flop(s) == N * M
def test_max_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, _) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW), lambda n, co, h, w: tvm.te.max(D[n][co][h + kh][w + kw], axis=[kh, kw])
)
s = te.create_schedule([C.op])
assert compute_flop(s) == N * CO * OH * OW * KH * KW
def test_average_pool():
for i in range(5):
N, H, W, CO, CI, KH, KW = [np.random.randint(10, 32) for _ in range(7)]
(input_dtype, acc_dtype) = random_dtypes()
D = te.placeholder((N, CI, H, W), dtype=input_dtype)
KH = min(H, KH)
KW = min(W, KW)
kh = te.reduce_axis((0, KH))
kw = te.reduce_axis((0, KW))
OH = (H - KH) + 1
OW = (W - KW) + 1
C = te.compute(
(N, CO, OH, OW),
lambda n, co, h, w: te.sum(
te.div(D[n][co][h + kh][w + kw].astype(acc_dtype), (KW * KH)), axis=[kh, kw]
),
)
s = te.create_schedule([C.op])
assert compute_flop(s) == 2 * N * CO * OH * OW * KH * KW
def test_move():
"""No float number operation in simple move. So the estimator should raise an error"""
N = 1024
A = te.placeholder((N,))
C = te.compute((N,), lambda i: A[i])
s = te.create_schedule([C.op])
try:
compute_flop(s)
assert False
except RuntimeError:
pass
if __name__ == "__main__":
test_conv()
test_pack_gemm()
test_outer_dot()
test_move()
| 5,239 | 29.114943 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_task_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test task scheduler """
import tempfile
import multiprocessing
import numpy as np
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin():
tasks = []
for n in [2, 4, 8]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
num_trials_per_task = 2
# Tune all tasks
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=num_trials_per_task * len(tasks),
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(tasks, strategy="round-robin", callbacks=[])
task_scheduler.tune(tune_option, search_policy="sketch.random")
# Check the result of round robin
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
for task in tasks:
assert counters[task.workload_key] == num_trials_per_task
# test continuous tuning (restoring the status)
task_scheduler = auto_scheduler.TaskScheduler(
tasks, strategy="round-robin", load_log_file=log_file, callbacks=[]
)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=len(tasks),
num_measures_per_round=1,
)
task_scheduler.tune(tune_option, search_policy="sketch.random")
del measure_ctx
@tvm.testing.requires_llvm
def task_scheduler_round_robin_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_task_scheduler_round_robin()
@tvm.testing.requires_llvm
def test_task_scheduler_round_robin_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=task_scheduler_round_robin_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_task_scheduler_gradient():
tasks = []
for n in [2, 4]:
tasks.append(
auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(n, n, n), target="llvm"
)
)
def objective_func(costs):
return 1e5 * costs[0]
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
n_trials = 5
# Tune all tasks
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=n_trials,
runner=measure_ctx.runner,
num_measures_per_round=1,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
task_scheduler = auto_scheduler.TaskScheduler(
tasks, objective_func=objective_func, callbacks=[]
)
# Forcely rewrite the initial values.
# This can make this test more stable on the slow CI machines
task_scheduler.best_costs = np.array([1e2, 1e-8])
task_scheduler.tune(tune_option, search_policy="sketch.random")
# Check the allocation results
counters = {}
for task in tasks:
counters[task.workload_key] = 0
for inp, _ in auto_scheduler.load_records(log_file):
counters[inp.task.workload_key] += 1
assert counters[tasks[0].workload_key] == n_trials - 1
assert counters[tasks[1].workload_key] == 1
del measure_ctx
if __name__ == "__main__":
test_task_scheduler_round_robin()
test_task_scheduler_round_robin_spawn()
test_task_scheduler_gradient()
| 4,745 | 31.506849 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_multi_anchor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay
def get_dense_dense(data_shape, weight_shape):
def multi_dense():
p_data = relay.var("p_data", shape=data_shape, dtype="float32")
p_weight1 = relay.var("p_weight1", shape=weight_shape, dtype="float32")
p_weight2 = relay.var("p_weight2", shape=weight_shape, dtype="float32")
dense1 = relay.nn.dense(p_data, p_weight1)
dense2 = relay.nn.dense(dense1, p_weight2)
f = relay.Function([p_data, p_weight1, p_weight2], dense2)
f = f.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return f
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape, dtype="float32")
out = relay.Call(multi_dense(), [data, weight1, weight2])
return relay.Function([data, weight1, weight2], out)
def get_ref(data_np, weight1_np, weight2_np):
dense1 = np.dot(data_np, np.transpose(weight1_np))
return np.dot(dense1, np.transpose(weight2_np))
def schedule_dense_dense(sch):
dense1 = sch.get_block("T_matmul_NT")
dense2 = sch.get_block("T_matmul_NT_1")
_y1, _x1, _k1 = sch.get_loops(dense1)
_y2, _x2, _k2 = sch.get_loops(dense2)
def test_dense_dense():
M, N, K = 128, 128, 128
data_shape = (M, K)
weight_shape = (N, K)
relay_mod = tvm.IRModule.from_expr(get_dense_dense(data_shape, weight_shape))
data_np = np.random.randn(*data_shape).astype("float32")
weight1_np = np.random.randn(*weight_shape).astype("float32")
weight2_np = np.random.randn(*weight_shape).astype("float32")
target = "llvm"
params = {"weight1": weight1_np, "weight2": weight2_np}
def schedule_fn(sch):
if "nn_dense_nn_dense" in sch.mod.attrs["task_name"]:
schedule_dense_dense(sch)
return True
return False
with ms.database.ScheduleFnDatabase(schedule_fn):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
lib = relay.build(relay_mod, target=target, params=params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = get_ref(data_np, weight1_np, weight2_np)
tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
test_dense_dense()
| 3,399 | 37.202247 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_target_texture_codegen_opencl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import autotvm
from tvm import te
from tvm.topi import testing
from tvm.topi.utils import get_const_tuple, simplify
from tvm.topi import nn
def compute_plus_one_rank3(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k: X[i, j, k] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank3(X, Y):
s = te.create_schedule(Y.op)
# Xt = s.cache_read(X, "texture", [Y])
# Xt = s.cache_read(X, "global", [Y])
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
x, y, c = s[Xt].op.axis
s[Xt].bind(x, te.thread_axis("blockIdx.x"))
s[Xt].bind(y, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(c)
# the compute stage
x, y, c = s[Y].op.axis
xo, yo, xi, yi = s[Y].tile(x, y, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(c)
return s
def compute_plus_one_rank5(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k, l, m: X[i, j, k, l, m] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank5(X, Y):
s = te.create_schedule(Y.op)
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
a, b, c, d, e = s[Xt].op.axis
abc = s[Xt].fuse(a, b, c)
s[Xt].bind(abc, te.thread_axis("blockIdx.x"))
s[Xt].bind(d, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(e)
# the compute stage
a, b, c, d, e = s[Y].op.axis
abc = s[Y].fuse(a, b, c)
xo, yo, xi, yi = s[Y].tile(abc, d, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(e)
return s
def compute_matmul(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1]), name="k")
C = te.compute(
(shape[0] * shape[2], shape[0] * shape[2]),
lambda i, j: te.sum(
A[i // shape[2], k, i % shape[2]].astype("float32")
* B[j // shape[2], k, j % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_io, _k, _ii = s[stage].op.axis
s[stage].vectorize(_ii)
s[stage].bind(_io, bx)
s[stage].bind(_k, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_k, _x, _y)
s[Cl].unroll(_x)
s[Cl].vectorize(_y)
if local:
s[Al].compute_at(s[Cl], _k)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _k)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_inner(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1] * shape[2]), name="k")
# (M, K) x (N, K)
# (32, 256) x (32, 256)
# (32, 64, 4) x (32, 64, 4)
C = te.compute(
(shape[0], shape[0]),
lambda i, j: te.sum(
A[i, k // shape[2], k % shape[2]].astype("float32")
* B[j, k // shape[2], k % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_inner(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_i, _ko, _ki = s[stage].op.axis
s[stage].vectorize(_ki)
s[stage].bind(_i, bx)
s[stage].bind(_ko, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_x, _y, _k)
s[Cl].unroll(_x)
# TODO(csullivan): consider whether the below error is worth resolving
# s[Cl].vectorize(_y) # error
if local:
s[Al].compute_at(s[Cl], _x)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _x)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_vector_accumulator(shapeA, shapeB):
# A x B
# (K/4, M, K%4) x (K, N/4, N%4) = (M, N)
# (32, 64, 4) x (128, 16, 4) = (64, 64)
A = te.placeholder(shapeA, name="A", dtype="float32")
B = te.placeholder(shapeB, name="B", dtype="float32")
k = te.reduce_axis((0, shapeB[0]), name="k")
C = te.compute(
(shapeA[1], shapeB[1] * shapeB[2]),
lambda i, j: te.sum(
A[k // shapeA[-1], i, k % shapeA[-1]].astype("float32")
* B[k, j // shapeB[-1], j % shapeB[-1]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_vector_accumulator(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
_y, _x, _v = s[stage].op.axis
# TODO(csullivan): removing this vectorize results in numerical errors, autovectorize
s[stage].vectorize(_v)
s[stage].bind(_y, te.thread_axis("blockIdx.x"))
s[stage].bind(_x, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_a, _b = s[Cl].op.axis
_ko, _ki = s[Cl].split(_k, factor=4)
s[Cl].reorder(_ko, _a, _ki, _b)
s[Cl].unroll(_ki)
s[Cl].unroll(_a)
s[Cl].vectorize(_b)
if local:
s[Al].compute_at(s[Cl], _a)
_aa, _ka, _ba = s[Al].op.axis
# TODO(csullivan)[BEFORE PR]: removing this vectorize command causes a crash. This needs to be autovectorized.
s[Al].vectorize(_ba)
s[Bl].compute_at(s[Cl], _ko)
_ab, _kb, _bb = s[Bl].op.axis
s[Bl].vectorize(_bb)
s[Bl].unroll(_ab)
return s
def compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
kh = te.reduce_axis((0, filter_shape[0]), name="kh")
kw = te.reduce_axis((0, filter_shape[1]), name="kw")
conv = te.compute(
(input_shape[0], filter_shape[-2], input_shape[2], input_shape[3], filter_shape[-1]),
lambda n, ko, i, j, ki: te.sum(
data[n, c, i, j, c4].astype("float32")
* filt[kh, kw, c * input_shape[-1] + c4, ko, ki].astype("float32"),
axis=[kh, kw, c, c4],
),
# name="Compute_conv2d_1x1_NCHWc_RSCKk",
name="conv2d_1x1",
)
return data, filt, conv
def schedule_conv2d_1x1_NCHWc_RSCKk(data, filt, conv):
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# outputs:
s = te.create_schedule(conv.op)
A, B, C = data, filt, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_n, _ko, _h, _w, _ki = s[C].op.axis
s[C].vectorize(_ki)
s[C].bind(_n, te.thread_axis("blockIdx.x"))
s[C].bind(_ko, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], _w)
_nl, _kol, _hl, _wl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
_clo, _cli = s[Cl].split(_cl, factor=4)
s[Cl].reorder(_clo, _cli, _cl4, _kil)
s[Cl].unroll(_cli)
s[Cl].unroll(_cl4)
s[Cl].vectorize(_kil)
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _kwl)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
# input_shape = [W, C, H, N, c] -> [W, C, H*N, c]
# filter_shape = [C, R, S, K, k] -> [C, R*S*K, k]
# output_shape: [WK, HN, k] -> [W, K, H, N, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
packed_data = te.compute(
(input_shape[0], input_shape[1], input_shape[2] * input_shape[3], input_shape[4]),
lambda i, j, k, l: data[i, j, k // input_shape[3], k % input_shape[3], l],
name="packed_data",
)
# Logical transformation of Nd -> 3d tensor
# CRSKk -> C|RSK|k
# r = rsk // SK
# sk = rsk % SK
# s = sk // K == (rsk % SK) // K == (rsk // K) % S
# k = sk % K == (rsk % SK) % K == rsk % K
packed_filter = te.compute(
(filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3], filter_shape[4]),
lambda i, j, k: filt[
i,
j // (filter_shape[3] * filter_shape[2]),
(j // filter_shape[3]) % filter_shape[2],
j % filter_shape[3],
k,
],
name="packed_filter",
)
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
r = te.reduce_axis((0, filter_shape[1]), name="r")
s = te.reduce_axis((0, filter_shape[2]), name="s")
conv = te.compute(
(input_shape[0], filter_shape[3], input_shape[2], input_shape[3], filter_shape[4]),
lambda w, ko, h, n, ki: te.sum(
packed_data[w, c, h * input_shape[3] + n, c4].astype("float32")
* packed_filter[
c * input_shape[-1] + c4, ((r * filter_shape[2]) + s) * filter_shape[3] + ko, ki
].astype("float32"),
axis=[r, s, c, c4],
),
name="conv2d_1x1",
)
return data, filt, packed_data, packed_filter, conv
def schedule_conv2d_1x1_WCHNc_CRSKk(data, filt, packed_data, packed_filter, conv):
# data: [W, C, H*N, c]
# filter: [C, R*S*K, k]
# output: [W, K, H, N, k]
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# data: (56, 128//4, 56*1, 4) = (56, 32, 56, 4)
# filt: (128, 1*1*128//4, 4) = (128, 32, 4)
# conv: (56, 32, 56, 1, 4)
s = te.create_schedule(conv.op)
cfg = autotvm.get_config()
s[packed_data].compute_inline()
s[packed_filter].compute_inline()
A, B, C = packed_data, packed_filter, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_w, _ko, _h, _n, _ki = s[C].op.axis
kernel_scope, _n = s[C].split(_n, nparts=1)
cfg.define_split("tile_f", _ko, num_outputs=4)
cfg.define_split("tile_w", _w, num_outputs=4)
cfg.define_split("tile_h", _h, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
bk, vk, tk, ki = cfg["tile_f"].apply(s, C, _ko)
bw, vw, tw, wi = cfg["tile_w"].apply(s, C, _w)
bh, vh, th, hi = cfg["tile_h"].apply(s, C, _h)
s[C].reorder(bh, _n, vh, th, hi)
bhn = s[C].fuse(bh, _n)
s[C].bind(bk, te.thread_axis("blockIdx.z"))
s[C].bind(bhn, te.thread_axis("blockIdx.y"))
s[C].bind(bw, te.thread_axis("blockIdx.x"))
s[C].bind(vk, te.thread_axis("vthread"))
s[C].bind(vh, te.thread_axis("vthread"))
s[C].bind(vw, te.thread_axis("vthread"))
s[C].bind(tk, te.thread_axis("threadIdx.z"))
s[C].bind(th, te.thread_axis("threadIdx.y"))
s[C].bind(tw, te.thread_axis("threadIdx.x"))
s[C].reorder(bw, bk, bhn, vw, vk, vh, tw, tk, th, ki, hi, wi, _ki)
s[C].vectorize(_ki)
# TODO(csullivan): Try uneven workgroup split
# _wo, _wi = s[C].split(_w, factor=4)
# #_hno, _hni = s[C].split(_hn, factor=8)
# #s[C].reorder(_wo, _wi, _ko, _hno, _hni, _ki)
# s[C].reorder(_wo, _ko, _hn, _ki, _wi)
# s[C].unroll(_wi)
# # mace:
# # const int out_ch_blk = get_global_id(0);
# # const int out_w_blk = get_global_id(1);
# # const int out_hb = get_global_id(2);
# bx = te.thread_axis("blockIdx.x")
# by = te.thread_axis("blockIdx.y")
# bz = te.thread_axis("blockIdx.z")
# s[C].bind(_ko, bx)
# s[C].bind(_wo, by)
# s[C].bind(_hn, bz)
# s[Cl].compute_at(s[C], _hn)
s[Cl].compute_at(s[C], th)
_wl, _kol, _hl, _nl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
cfg.define_split("tile_c", _cl, num_outputs=2)
cfg.define_split("tile_kh", _khl, num_outputs=2)
cfg.define_split("tile_kw", _kwl, num_outputs=2)
_clo, _cli = cfg["tile_c"].apply(s, Cl, _cl)
_khlo, _khli = cfg["tile_kh"].apply(s, Cl, _khl)
_kwlo, _kwli = cfg["tile_kw"].apply(s, Cl, _kwl)
# s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli, _kol, _hl, _nl, _kil, _wl)
# s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli)
# s[Cl].reorder(_cl, _cl4, _kil, _wl)
s[Cl].unroll(_cl4)
s[Cl].unroll(_wl)
s[Cl].vectorize(_kil)
_wla, _cla, _hnla, _cl4a = s[Al].op.axis
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(_cl4a)
s[Al].unroll(_wla)
_clb, _rskolb, _kilb = s[Bl].op.axis
s[Bl].compute_at(s[Cl], _cli)
s[Bl].vectorize(_kilb)
s[Bl].unroll(_clb)
s[C].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
WO, K, HO, N, K4 = get_const_tuple(C.shape)
RSC, _, _ = get_const_tuple(B.shape)
cfg.add_flop(2 * N * K * K4 * HO * WO * RSC)
return s
def compute_conv2d_NCHWc_KCRSk(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
return te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
temp[
nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb
].astype(out_dtype)
* Filter[
ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb
].astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, conv):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_conv2d_NCHWc_KCRSk_acc32(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
output = te.compute(conv.shape, lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype("float32"))
return output
def schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
Input, Filter, stride, padding, dilation, out_dtype=None
):
"""Depthwise convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, channel_chunk, in_height, in_width, channel_block = Input.shape
_, channel_multiplier, kernel_h, kernel_w, _ = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel_chunk = simplify(channel_chunk * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x CMRSc = [N,(C//4)M,OH,OW, 4c]
# NCHWc x CMRS
# texture: NCH|W|c
# texture: C|MRS|c
# output: N
# m = mrs//RS
# rs = mrs % RS
# r = rs // W == (mrs // S) % R
# s = rs % W == mrs % S
Filter = te.compute(
(channel_chunk, channel_multiplier * kernel_h * kernel_w, channel_block),
lambda ffc, mrs, ffb: Filter[
ffc, mrs // (kernel_h * kernel_w), (mrs // kernel_w) % kernel_h, mrs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, out_channel_chunk, out_height, out_width, channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc // channel_multiplier,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[
ffc // channel_multiplier,
((ffc % channel_multiplier) * kernel_h + ry) * kernel_w + rx,
ffb,
]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc_kcrsk_texture",
)
return te.compute(
conv.shape, lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype("float32")
)
def schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
ry, rx = s[OL].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[OL].vectorize(fb)
# s[OL].unroll()
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
ICC, MKHKW, ICB = get_const_tuple(kernel.shape)
M = (OCC * OCB) // (ICC * ICB)
KHKW = MKHKW // M
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
def scheduler(compute, schedule, *args, **kwargs):
placeholders = compute(*args)
s = schedule(*placeholders, **kwargs)
return s, placeholders
def conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_NCHWc_RSCKk(*placeholders)
return s, placeholders
def conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_WCHNc_CRSKk(*placeholders)
return s, (placeholders[0], placeholders[1], placeholders[-1])
def conv2d_NCHWc_KCRSk(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
conv = compute_conv2d_NCHWc_KCRSk(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [conv]])
schedule_conv2d_NCHWc_KCRSk(cfg, s, conv)
return s, (data, filt, conv)
def conv2d_NCHWc_KCRSk_fp32_acc(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_conv2d_NCHWc_KCRSk_acc32(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def depthwise_conv2d_NCHWc_KCRSk_acc32(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
data, filt, [1, 1], [0, 0], [1, 1], "float32"
)
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def ref_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = 1
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def ref_depthwise_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = kernel.shape[0]
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
multiplier = kernel.shape[1]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def validate(workload, target, dev, input_shapes, *args, **kwargs):
s, placeholders = workload(*input_shapes, *args, **kwargs)
func = tvm.driver.build(s, [*placeholders], target=target, name="TestFunction")
args_tvm = []
args_np = []
for var in placeholders[:-1]:
var_np = np.random.uniform(size=[i.value for i in var.shape]).astype(var.dtype)
args_np.append(var_np)
args_tvm.append(tvm.nd.array(var_np, dev))
args_tvm.append(
tvm.nd.array(
np.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype), dev
)
)
func(*args_tvm)
if "plus_one" in workload.__name__:
np_result = args_np[0] + 1.0
elif "matmul" in workload.__name__:
if "inner" in workload.__name__:
np_result = np.matmul(
args_np[0].reshape(32, 256), args_np[1].reshape(32, 256).transpose(1, 0)
)
elif "accum" in workload.__name__:
np_result = np.matmul(
args_np[0].transpose((1, 0, 2)).reshape(64, 128), args_np[1].reshape(128, 64)
)
else:
np_result = np.matmul(
args_np[0].transpose((0, 2, 1)).reshape(128, 64),
args_np[1].transpose(1, 0, 2).reshape(64, 128),
)
elif "conv2d_1x1_NCHWc_RSCKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# rsckk -> rsck -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 2, 0, 1))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
elif "conv2d_1x1_WCHNc_CRSKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# wchnc -> nchw
args_np[0] = (
args_np[0]
.transpose((3, 1, 4, 2, 0))
.reshape(
args_np[0].shape[3],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[0],
)
)
# crskk -> crsk -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 0, 1, 2))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkkhw -> wkhnk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(4, 1, 3, 0, 2)
elif "NCHW_KCRS" in workload.__name__:
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
elif "NCHWc_KCRSk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# kcrsk/cmrsc -> kcrs/cmrs
args_np[1] = (
args_np[1]
.transpose((0, 4, 1, 2, 3))
.reshape(
args_np[1].shape[0] * args_np[1].shape[4],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3],
)
)
if "depthwise" in workload.__name__:
# np_result = testing.depthwise_conv2d_python_nchw(args_np[0], args_np[1], 1, "VALID")
np_result = ref_depthwise_convolution(args_np[0], args_np[1], [], [])
else:
# np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
np_result = ref_convolution(args_np[0], args_np[1], [], [])
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
np.testing.assert_allclose(args_tvm[-1].asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class BaseSingleShapeValidator:
@tvm.testing.parametrize_targets("opencl")
def test_unary(self, test_func, input_shape, target, dev):
validate(test_func, target, dev, [input_shape])
class TestPlusOneRank3(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 32, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank3, schedule_plus_one_rank3, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestPlusOneRank5(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 2, 4, 4, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank5, schedule_plus_one_rank5, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestMatmul:
input_shape = tvm.testing.parameter((32, 64, 4))
local = tvm.testing.parameter(False, True)
def matmul(input_shape, local):
return scheduler(compute_matmul, schedule_matmul, input_shape, local=local)
def matmul_inner(input_shape, local):
return scheduler(compute_matmul_inner, schedule_matmul_inner, input_shape, local=local)
test_func = tvm.testing.parameter(matmul, matmul_inner)
@tvm.testing.parametrize_targets("opencl")
def test_matmul(self, test_func, input_shape, local, target, dev):
validate(test_func, target, dev, [input_shape], local=local)
class TestMatmulVectorAccumulator:
shapeA = tvm.testing.parameter((32, 64, 4))
shapeB = tvm.testing.parameter((128, 16, 4))
local = tvm.testing.parameter(False, True)
def matmul_vector_accumulator(shapeA, shapeB, local):
return scheduler(
compute_matmul_vector_accumulator,
schedule_matmul_vector_accumulator,
shapeA,
shapeB,
local=local,
)
test_func = tvm.testing.parameter(matmul_vector_accumulator)
@tvm.testing.parametrize_targets("opencl")
def test_matmul_vec_acc(self, test_func, shapeA, shapeB, local, target, dev):
validate(test_func, target, dev, [shapeA, shapeB], local=local)
class BaseConv2DValidator:
@tvm.testing.parametrize_targets("opencl")
def test_conv2d(self, test_func, input_shapes, target, dev):
validate(test_func, target, dev, input_shapes)
class TestConv2dNCHWcRSCKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 32, 56, 56, 4), (1, 1, 128, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_NCHWc_RSCKk)
class TestConv2dWCHNcCRSKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(56, 32, 56, 1, 4), (128, 1, 1, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_WCHNc_CRSKk)
class TestConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter(
[(1, 32, 56, 56, 4), (32, 128, 1, 1, 4)], [(1, 32, 112, 112, 4), (32, 128, 3, 3, 4)]
)
test_func = tvm.testing.parameter(conv2d_NCHWc_KCRSk, conv2d_NCHWc_KCRSk_fp32_acc)
class TestDepthwiseConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 24, 257, 257, 4), (24, 1, 3, 3, 4)])
test_func = tvm.testing.parameter(depthwise_conv2d_NCHWc_KCRSk_acc32)
def simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, cast_type
):
def _compute():
p0 = te.placeholder(input_info[1], name="p0", dtype=dtype)
p0_comp = te.compute(input_info[1], lambda *i: p0(*i), name="p0_comp")
if len(output_info[1]) == 4 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w: p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
elif len(output_info[1]) == 5 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w, cb: p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
else:
raise Exception("Impossible case")
dummy_out = te.compute(output_info[1], lambda *i: out(*i), name="dummy_out")
return p0, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out = s[dummy_out].op.input_tensors[0]
p0_comp = s[out].op.input_tensors[0]
s[p0_comp].set_scope(input_info[0])
bind_data_copy(s[p0_comp])
s[out].set_scope(output_info[0])
bind_data_copy(s[out])
bind_data_copy(s[dummy_out])
return s
p0, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p0, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np = np.random.uniform(size=[i for i in input_info[1]]).astype(dtype)
input_tvm = tvm.nd.array(input_np, dev)
c = tvm.nd.empty(output_info[1], dtype, dev)
# Doesn't run OpenCL code for FP16 because GPUs in CI don't support FP16 inference
if cast_type == "float32":
fun(input_tvm, c)
# For output len == 5 it makes no sense to check the accuracy
if cast_type == "float32" and len(output_info[1]) == 4:
np_result = input_np.transpose(0, 2, 3, 1, 4) # NCHW4c -> NHWC4c
np_result = np.squeeze(np_result, axis=3)
np_result = np_result.transpose(0, 3, 1, 2) # NHWC -> NCHW
np.testing.assert_allclose(c.asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class TestSimpleTextureToScalarFP16:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]));",
],
),
# 2. Buffer (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]));"
],
),
# 3. Texture (NCHW4c) -> Cast(FP16) -> Texture (NCHW4c)
(
["global.texture", (1, 1, 40, 40, 4)],
["global.texture", (1, 1, 40, 40, 4)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5))));",
"write_imageh(out, (int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5)), (convert_half4(v_)));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_simple_texture_to_scalar_fp16(
self, input_info, output_info, find_patterns, dtype, target
):
simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, "float16"
)
class TestSimpleTextureToScalarFP32:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((float*)&v_)[((convert_int(get_group_id(0))) >> 1)];",
],
),
# 2. Buffer (NCHW4c) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))];"
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_simple_texture_to_scalar_fp32(
self, input_info, output_info, find_patterns, dtype, target
):
simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, "float32"
)
def texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, cast_type
):
def _compute():
p0 = te.placeholder(input_info[1], name="p0", dtype=dtype)
p0_comp = te.compute(input_info[1], lambda *i: p0(*i), name="p0_comp")
if len(output_info[1]) == 4 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w: p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
out2 = te.compute(
output_info[1],
lambda n, c, h, w: out[n][c][h][w]
+ p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
elif len(output_info[1]) == 5 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w, cb: p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
out2 = te.compute(
output_info[1],
lambda n, c, h, w, cb: out[n][c][h][w][cb]
+ p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
else:
raise Exception("Impossible case")
out_sum = te.compute(output_info[1], lambda *i: out(*i) + out2(*i), name="out_sum")
dummy_out = te.compute(output_info[1], lambda *i: out_sum(*i), name="dummy_out")
return p0, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out_sum = s[dummy_out].op.input_tensors[0]
out, out2 = s[out_sum].op.input_tensors
p0_comp = s[out].op.input_tensors[0]
s[p0_comp].set_scope(input_info[0])
bind_data_copy(s[p0_comp])
s[out].set_scope(output_info[0])
s[out2].set_scope(output_info[0])
s[out2].compute_inline()
s[out].compute_inline()
s[out_sum].set_scope(output_info[0])
bind_data_copy(s[out_sum])
bind_data_copy(s[dummy_out])
return s
p0, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p0, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np = np.random.uniform(size=[i for i in input_info[1]]).astype(dtype)
input_tvm = tvm.nd.array(input_np, dev)
c = tvm.nd.empty(output_info[1], dtype, dev)
# Doesn't run OpenCL code for FP16 because GPUs in CI don't support FP16 inference
if cast_type == "float32":
fun(input_tvm, c)
# For output len == 5 it makes no sense to check the accuracy
if cast_type == "float32" and len(output_info[1]) == 4:
np_result = input_np * 3
np_result = np_result.transpose(0, 2, 3, 1, 4) # NCHW4c -> NHWC4c
np_result = np.squeeze(np_result, axis=3)
np_result = np_result.transpose(0, 3, 1, 2) # NHWC -> NCHW
np.testing.assert_allclose(c.asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class TestTextureToScalarReuseSSAFP16:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)])) + ((convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)])) + (convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]))));",
],
),
# 2. Buffer (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
" out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))])) + ((convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))])) + (convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]))));"
],
),
# 3. Texture (NCHW4c) -> Cast(FP16) -> Texture (NCHW4c)
(
["global.texture", (1, 1, 40, 40, 4)],
["global.texture", (1, 1, 40, 40, 4)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5))));",
"write_imageh(out_sum, (int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5)), ((convert_half4(v_)) + ((convert_half4(v_)) + (convert_half4(v_)))));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_texture_to_scalar_reuse_ssa_fp16(
self, input_info, output_info, find_patterns, dtype, target
):
texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, "float16"
)
class TestTextureToScalarReuseSSAFP32:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (((float*)&v_)[((convert_int(get_group_id(0))) >> 1)] + (((float*)&v_)[((convert_int(get_group_id(0))) >> 1)] + ((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]));",
],
),
# 2. Buffer (NCHW4c) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))] + (p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))] + p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]));"
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_texture_to_scalar_reuse_ssa_fp32(
self, input_info, output_info, find_patterns, dtype, target
):
texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, "float32"
)
class TestLocalArrayToTexture:
# 1. conv2d(Texture(NCHW4c), Texture(OIHW4o)) -> local_array[4] -> Texture (NCHW4c)
input_shape1, input_shape2, output_shape, find_patterns = tvm.testing.parameters(
(
(1, 1, 40, 40, 4),
(2, 4, 3, 3, 4),
(1, 2, 38, 38, 4),
[
"float out_local[4];",
"float4 v_ = READ_IMAGEF(p1_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 14) + (convert_int(get_local_id(0)))) % 38), (((((convert_int(get_group_id(0))) * 64) + ((convert_int(get_local_id(0))) >> 1)) % 722) / 19))));",
"float4 v__1 = READ_IMAGEF(p2_comp, image_sampler, ((int2)(rw, (((((((convert_int(get_group_id(0))) * 32) + ((convert_int(get_local_id(0))) >> 2)) / 361) * 12) + (rcb * 3)) + rh))));",
"out_local[cb_c] = (out_local[cb_c] + (((float*)&v_)[rcb] * ((float*)&v__1)[cb_c]));",
"write_imagef(out, (int2)(((((convert_int(get_group_id(0))) * 14) + (convert_int(get_local_id(0)))) % 38), ((((convert_int(get_group_id(0))) * 64) + ((convert_int(get_local_id(0))) >> 1)) / 19)), vload4(0, out_local + 0));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_local_array_to_texture(
self, input_shape1, input_shape2, output_shape, find_patterns, dtype, target
):
def _compute():
p1 = te.placeholder(input_shape1, name="p1", dtype=dtype)
p1_comp = te.compute(input_shape1, lambda *i: p1(*i), name="p1_comp")
p2 = te.placeholder(input_shape2, name="p2", dtype=dtype)
p2_comp = te.compute(input_shape2, lambda *i: p2(*i), name="p2_comp")
KH, KW = input_shape2[2], input_shape2[3]
IC, ICB = input_shape1[1], input_shape1[4]
rh = te.reduce_axis((0, KH), name="rh")
rw = te.reduce_axis((0, KW), name="rw")
rc = te.reduce_axis((0, IC), name="rc")
rcb = te.reduce_axis((0, ICB), name="rcb")
out = te.compute(
output_shape,
lambda n, c, h, w, cb: te.sum(
(p1_comp[n, rc, h, w, rcb] * p2_comp[c, rc * ICB + rcb, rh, rw, cb]).astype(
dtype
),
axis=[rh, rw, rc, rcb],
),
name="out",
)
dummy_out = te.compute(output_shape, lambda *i: out(*i), name="dummy_out")
return p1, p2, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out = s[dummy_out].op.input_tensors[0]
p1_comp, p2_comp = s[out].op.input_tensors
bind_data_copy(s[p1_comp])
s[p1_comp].set_scope("global.texture")
bind_data_copy(s[p2_comp])
s[p2_comp].set_scope("global.texture")
OL = s.cache_write(out, "local")
n, c, h, w, cb = s[out].op.axis
fused = s[out].fuse(n, c, h, w)
bx, tx = s[out].split(fused, 128)
s[out].reorder(bx, tx, cb)
s[out].vectorize(cb)
s[out].set_scope("global.texture")
s[out].bind(bx, te.thread_axis("blockIdx.x"))
s[out].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[out], tx)
bind_data_copy(s[dummy_out])
return s
p1, p2, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p1, p2, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np1 = np.random.uniform(size=[i for i in input_shape1]).astype(dtype)
input_np2 = np.random.uniform(size=[i for i in input_shape2]).astype(dtype)
input_tvm1 = tvm.nd.array(input_np1, dev)
input_tvm2 = tvm.nd.array(input_np2, dev)
c = tvm.nd.empty(output_shape, dtype, dev)
fun(input_tvm1, input_tvm2, c)
if __name__ == "__main__":
tvm.testing.main()
| 66,834 | 36.611142 | 574 | py |
tvm | tvm-main/tests/python/unittest/test_tir_te_extern_primfunc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
# TODO(csullivan): Additional tests cases needed:
# - PrimFunc with 1 arg, inplace update
# - PrimFunc with buffer that uses custom storage_scope
@T.prim_func
def func_1(A: T.Buffer((16,), "float32"), C: T.Buffer((1,), "float32")):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
C[0] = C[0] + A[i] + B[0] + T.float32(1)
A[i] = B[0] + T.float32(1)
def verify_func_1(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
module(a, c)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1), c.numpy(), rtol=1e-4)
# also test in place update
tvm.testing.assert_allclose(a_np * 2 + 1, a.numpy(), rtol=1e-4)
@T.prim_func
def func_2(
C: T.Buffer((1,), "float32"), A: T.Buffer((16,), "float32"), D: T.Buffer((2,), "float32")
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_2(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
module(c, a, d)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
@T.prim_func
def func_3(
C: T.Buffer((1,), "float32"),
A: T.Buffer((16,), "float32"),
D: T.Buffer((2,), "float32"),
E: T.Buffer((16,), "float32"),
F: T.Buffer((16,), "float32"),
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
E[i] = A[i]
F[i] = E[i] + 1.0
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_3(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
e_np = np.zeros((16,), dtype=np.float32)
f_np = np.zeros((16,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
e = tvm.nd.array(e_np, device=tvm.cpu(0))
f = tvm.nd.array(f_np, device=tvm.cpu(0))
module(c, a, d, e, f)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4)
@T.prim_func
def func_4(
C: T.Buffer((1,), "float32"),
A: T.Buffer((16,), "float32"),
F: T.Buffer((16,), "float32"),
D: T.Buffer((2,), "float32"),
E: T.Buffer((16,), "float32"),
):
for i in T.serial(
0,
16,
):
with T.block():
B = T.alloc_buffer((1,), dtype="float32")
with T.block():
B[0] = A[i] * T.float32(2)
with T.block():
E[i] = A[i]
F[i] = E[i] + 1.0
C[0] = C[0] + A[i] + B[0] + T.float32(1) + D[0]
A[i] = B[0] + T.float32(1) + D[1]
def verify_func_4(module):
a_np = np.random.randint(low=-128, high=127, size=(16,)).astype(np.float32)
d_np = np.random.randint(low=-128, high=127, size=(2,)).astype(np.float32)
c_np = np.zeros((1,), dtype=np.float32)
e_np = np.zeros((16,), dtype=np.float32)
f_np = np.zeros((16,), dtype=np.float32)
a = tvm.nd.array(a_np, device=tvm.cpu(0))
d = tvm.nd.array(d_np, device=tvm.cpu(0))
c = tvm.nd.array(c_np, device=tvm.cpu(0))
e = tvm.nd.array(e_np, device=tvm.cpu(0))
f = tvm.nd.array(f_np, device=tvm.cpu(0))
module(c, a, f, d, e)
tvm.testing.assert_allclose(c_np + np.sum(3 * a_np + 1 + d_np[0]), c.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np * 2 + 1 + d_np[1], a.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np, e.numpy(), rtol=1e-4)
tvm.testing.assert_allclose(a_np + 1, f.numpy(), rtol=1e-4)
class TestPrimFuncs:
func, params, verify = tvm.testing.parameters(
[func_1, ("A"), verify_func_1],
[func_2, ("C", "D"), verify_func_2],
[func_3, ("C", "A", "D", "E"), verify_func_3],
[func_4, ("C", "A", "D", "E"), verify_func_4],
)
def test_primfunc_call(self, func, verify):
target = tvm.target.Target("llvm")
func = tvm.build(func, target=target)
verify(func)
def test_te_extern_call(self, func, params, verify):
ir_mod = tvm.IRModule.from_expr(func.with_attr("global_symbol", "main"))
prim_func = ir_mod["main"]
buf_name_map = {buf.name: buf for buf in func.buffer_map.values()}
input_tensors = [te.placeholder(buf_name_map[name].shape) for name in params]
output = te.extern_primfunc(input_tensors, prim_func)
rt_prim_func = te.create_prim_func(tensors_from_extern_op(output, prim_func))
tvm.ir.assert_structural_equal(tvm.lower(prim_func), tvm.lower(rt_prim_func))
target = tvm.target.Target("llvm")
func = tvm.build(rt_prim_func, target=target)
verify(func)
def tensors_from_extern_op(extern, func):
if isinstance(extern, list):
output_tensors = extern
else:
output_tensors = [extern]
output_buffers = []
input_buffers = []
input_tensors = []
for ext in output_tensors:
output_buffers.extend(ext.op.output_placeholders)
input_buffers.extend(ext.op.input_placeholders)
input_tensors.extend(ext.op.input_tensors)
input_binds = dict(zip(input_buffers, input_tensors))
output_binds = dict(zip(output_buffers, output_tensors))
buffer_to_tensor = {**input_binds, **output_binds}
ordered_tensors = []
for var in func.params:
buf = func.buffer_map[var]
ordered_tensors.append(buffer_to_tensor[buf])
return ordered_tensors
if __name__ == "__main__":
tvm.testing.main()
| 7,857 | 33.769912 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_te_transform_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.tir.stmt_functor import post_order_visit
from tvm.driver.build_module import schedule_to_module
dtype = tvm.testing.parameter("int32")
def flatten_all_indices(preflatten_shape):
def mapping(*indices):
output = 0
for index, size in zip(indices, preflatten_shape):
output = output * size + index
return [output]
return mapping
def unpack_flattened_indices(preflatten_shape):
def mapping(i):
output = []
for dim in reversed(preflatten_shape):
output.append(i % dim)
i //= dim
return output[::-1]
return mapping
def traverse(s, op, callback):
visited = set()
def _traverse(op):
if op in visited:
return
visited.add(op)
for tensor in op.input_tensors:
_traverse(tensor.op)
callback(op)
_traverse(op)
class TestCompareAgainstExplicitReshape:
A_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
B_definition_style = tvm.testing.parameter(
"explicit_reshape",
"transform_layout",
)
reordered_shape = tvm.testing.parameter((2, 3, 4))
@tvm.testing.fixture
def n_items(self, reordered_shape):
return functools.reduce(lambda x, y: x * y, reordered_shape, 1)
@tvm.testing.fixture
def fphysical_layout(self, reordered_shape):
return unpack_flattened_indices(reordered_shape)
@tvm.testing.fixture
def fcompute(self, A_definition_style, B_definition_style, reordered_shape, n_items, dtype):
assert A_definition_style in ["explicit_reshape", "transform_layout"]
assert B_definition_style in ["explicit_reshape", "transform_layout"]
def func():
if A_definition_style == "explicit_reshape":
A_input = te.placeholder(shape=reordered_shape, name="A_input", dtype=dtype)
A = te.compute(
shape=(n_items,),
fcompute=lambda i: A_input[
i // (reordered_shape[1] * reordered_shape[2]),
(i // reordered_shape[2]) % reordered_shape[1],
i % reordered_shape[2],
],
name="A",
)
elif A_definition_style == "transform_layout":
A = te.placeholder(shape=(n_items,), name="A", dtype=dtype)
A_input = A
B = te.compute(shape=A.shape, fcompute=lambda i: A[i], name="B")
if B_definition_style == "explicit_reshape":
B_output = te.compute(
shape=reordered_shape,
fcompute=lambda i, j, k: B[
i * reordered_shape[1] * reordered_shape[2] + j * reordered_shape[2] + k
],
name="B_output",
)
elif B_definition_style == "transform_layout":
B_output = B
return A_input, B_output
return func
@tvm.testing.fixture
def fschedule(self, A_definition_style, B_definition_style, fphysical_layout):
def func(outs):
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
s = te.create_schedule([x.op for x in outs])
def callback(op):
if (op.name == "A" and A_definition_style == "transform_layout") or (
op.name == "B" and B_definition_style == "transform_layout"
):
s[op].transform_layout(fphysical_layout)
traverse(s, outs[0].op, callback)
return s
return func
@tvm.testing.parametrize_targets("llvm")
def test_external_reshape(
self, target, dev, fcompute, fschedule, n_items, reordered_shape, dtype
):
A, B = fcompute()
s = fschedule(B)
func = tvm.build(s, [A, B], target=target, name="copy_reshape")
a_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
b_np = np.arange(n_items).reshape(reordered_shape).astype(dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
@tvm.testing.parametrize_targets("llvm")
def test_internal_reshape(self, target, dev, n_items, reordered_shape, dtype, fphysical_layout):
# The reshaping of the buffer gets flattened away in
# StorageFlatten. Therefore, testing the behavior by running only
# ApplyLayoutTransforms.
logical_shape = (n_items,)
A = te.placeholder(logical_shape, name="A", dtype=dtype)
B = te.compute(shape=logical_shape, fcompute=lambda i: A[i], name="B")
C = te.compute(shape=logical_shape, fcompute=lambda i: B[i], name="C")
s = te.create_schedule(C.op)
s[B].transform_layout(fphysical_layout)
mod = schedule_to_module(s, [A, C])
body = mod["main"].body
def walk_buffer_interactions(stmt, callback):
buffer_classes = [
tvm.tir.BufferLoad,
tvm.tir.BufferStore,
tvm.tir.BufferRealize,
]
def inner(node):
if (type(node) in buffer_classes) and node.buffer.name == "B":
callback(node)
post_order_visit(stmt, inner)
# All references to the buffer are the same object
def check_references():
buffer_object = None
def inner(node):
nonlocal buffer_object
if buffer_object is None:
buffer_object = node.buffer
else:
assert node.buffer.same_as(buffer_object)
return inner
# The buffer has the expected shape.
def check_shape(expected_shape):
def inner(node):
assert tuple(node.buffer.shape) == expected_shape
return inner
# Before the transform, the buffer should be in the logical shape.
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(logical_shape))
mod = tvm.tir.transform.ApplyLayoutTransforms()(mod)
body = mod["main"].body
# After the transform, the buffer should be in the physical shape.
walk_buffer_interactions(body, check_references())
walk_buffer_interactions(body, check_shape(reordered_shape))
class Test2DPhysicalLayout:
transform_A = tvm.testing.parameter(
"1d_A",
"2d_A",
"2d_rev_A",
"3d_A",
)
transform_B = tvm.testing.parameter(
"1d_B",
"2d_B",
"2d_rev_B",
"3d_B",
)
@staticmethod
def extract_logical_indices(stmt):
output = {}
# Since the for loops can be reordered by the layout
# transformation, identify the loop corresponding to each
# pre-transformation axis based on the iteration extent.
def callback(node):
if isinstance(node, tvm.tir.For):
output[node.loop_var] = node.extent.value
post_order_visit(stmt, callback)
return sorted(output, key=output.get)
def get_transform(self, name):
name = name[:-2]
if name == "1d":
return None
elif name == "2d":
return lambda i, j, k: [i, j, te.AXIS_SEPARATOR, k]
elif name == "2d_rev":
return lambda i, j, k: [k, j, te.AXIS_SEPARATOR, i]
elif name == "3d":
return lambda i, j, k: [i, te.AXIS_SEPARATOR, j, te.AXIS_SEPARATOR, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def transform_indices(self, name, logical_shape, logical_index_vars):
name = name[:-2]
i, j, k = logical_index_vars
if name == "1d":
return [i * (logical_shape[1] * logical_shape[2]) + j * logical_shape[2] + k]
elif name == "2d":
return [i * logical_shape[1] + j, k]
elif name == "2d_rev":
return [k * logical_shape[1] + j, i]
elif name == "3d":
return [i, j, k]
else:
raise ValueError(f"Unknown transformation: {name}")
def test_2d_physical(self, dtype, transform_A, transform_B):
logical_shape = (2, 3, 4)
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
func = self.get_transform(transform_A)
if func:
s[A].transform_layout(func)
func = self.get_transform(transform_B)
if func:
s[B].transform_layout(func)
# If the two buffers are accessed with the same indices, CSE
# will replace them with a Let binding. Since this makes it
# harder to test what the transformed indices are, disabling
# the CSE pass for this test.
with tvm.transform.PassContext(disabled_pass=["tir.CommonSubexprElimTIR"]):
mod = tvm.lower(s, [A, B])
logical_index_vars = self.extract_logical_indices(mod["main"].body)
expected_indices_A = self.transform_indices(transform_A, logical_shape, logical_index_vars)
expected_indices_B = self.transform_indices(transform_B, logical_shape, logical_index_vars)
def callback(node):
if type(node) in [tvm.tir.BufferLoad, tvm.tir.BufferStore]:
name = node.buffer.name
if name == "A":
expected_indices = expected_indices_A
elif name == "B":
expected_indices = expected_indices_B
else:
raise RuntimeError(f"Unexpected buffer: {name}")
tvm.ir.assert_structural_equal(expected_indices, node.indices)
post_order_visit(mod["main"].body, callback)
class TestTransformedSchedules:
logical_shape = tvm.testing.parameter((4, 6, 40))
transform_names = [
None,
"reverse",
"flatten_all",
"factor_last_by_4",
]
transform_A = tvm.testing.parameter(by_dict={f"A_{t}": t for t in transform_names})
transform_B = tvm.testing.parameter(
by_dict={f"B_{t}": t for t in transform_names if t is not None}
)
after_transform = tvm.testing.parameter(None)
def make_transform(self, logical_shape, transform_name):
if transform_name is None:
return lambda *indices: indices
elif transform_name == "reverse":
return lambda *indices: indices[::-1]
elif transform_name == "flatten_all":
return flatten_all_indices(logical_shape)
elif transform_name == "factor_last_by_4":
return lambda *indices, n: [*indices, n // 4, n % 4]
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
def make_transformed_shape(self, logical_shape, transform_name):
if transform_name is None:
return logical_shape
elif transform_name == "reverse":
return logical_shape[::-1]
elif transform_name == "flatten_all":
num_elements = functools.reduce(lambda x, y: x * y, logical_shape, 1)
return [num_elements]
elif transform_name == "factor_last_by_4":
*indices, n = logical_shape
return [*indices, n // 4, 4]
else:
raise NotImplementedError(f"Unknown transformation {transform_name}")
@tvm.testing.fixture
def expected_loop_order(self, logical_shape, transform_B, after_transform):
shape = self.make_transformed_shape(logical_shape, transform_B)
if after_transform == "reorder":
shape = shape[::-1]
elif after_transform == "split":
shape = [
*shape[:-1],
2,
shape[-1] // 2,
]
elif after_transform == "fuse":
fused_size = shape[0] if transform_B == "flatten_all" else shape[0] * shape[1]
shape = [fused_size, *shape[2:]]
return shape
@tvm.testing.fixture
def schedule(self, logical_shape, dtype, transform_A, transform_B, after_transform):
A = te.placeholder(shape=logical_shape, dtype=dtype, name="A")
B = te.compute(shape=A.shape, fcompute=lambda i, j, k: A[i, j, k], name="B")
s = te.create_schedule(B.op)
if transform_A:
s[A].transform_layout(self.make_transform(logical_shape, transform_A))
iter_vars = s[B].transform_layout(self.make_transform(logical_shape, transform_B))
iter_vars = list(iter_vars)
if after_transform == "reorder":
s[B].reorder(*iter_vars[::-1])
elif after_transform == "split":
s[B].split(iter_vars[-1], nparts=2)
elif after_transform == "fuse":
to_fuse = iter_vars[:2]
s[B].fuse(*iter_vars[:2])
return {
"schedule": s,
"tensors": [A, B],
"iter_vars": iter_vars,
}
def compare_tir_loop_order(self, stmt, expected_loop_order):
def collect_loops(node):
output = []
def callback(node):
if isinstance(node, tvm.tir.For):
output.append(node)
post_order_visit(node, callback)
return output[::-1]
loops = collect_loops(stmt)
loop_order = [loop.extent for loop in loops]
np.testing.assert_array_equal(loop_order, expected_loop_order)
def test_tir_loop_order(self, schedule, expected_loop_order):
func = tvm.lower(schedule["schedule"], schedule["tensors"])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
def test_te_loop_order(self, schedule, expected_loop_order):
s = schedule["schedule"]
A, B = schedule["tensors"]
iter_vars = schedule["iter_vars"]
# No reduction axis, so all leaf_iter_vars are over the data
# array, and should have the new iteration variables.
extents = [int(iter_var.dom.extent) for iter_var in s[B].leaf_iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
# layout_transform should return the new iteration variables.
extents = [int(iter_var.dom.extent) for iter_var in iter_vars]
np.testing.assert_array_equal(extents, expected_loop_order)
@pytest.mark.parametrize("after_transform", ["reorder", "split", "fuse"])
def test_use_transformed_axes(
self, schedule, expected_loop_order, transform_A, transform_B, after_transform
):
s = schedule["schedule"]
A, B = schedule["tensors"]
func = tvm.lower(s, [A, B])["main"]
self.compare_tir_loop_order(func.body, expected_loop_order)
class TestTransformCache:
A_size = tvm.testing.parameter(16)
transform_A = tvm.testing.parameter(by_dict={"transformA": True, "": False})
transform_B = tvm.testing.parameter(by_dict={"transformB": True, "": False})
cache_A = tvm.testing.parameter(by_dict={"cacheA": True, "": False})
cache_B = tvm.testing.parameter(by_dict={"cacheB": True, "": False})
@tvm.testing.fixture
def schedule_args(self, target, A_size, transform_A, transform_B, cache_A, cache_B, dtype):
A = te.placeholder(shape=[A_size], dtype=dtype, name="A")
B = te.compute(A.shape, lambda i: A[i], name="B")
s = te.create_schedule(B.op)
requires_thread_bind = "gpu" in tvm.target.Target(target).keys
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_z = te.thread_axis("threadIdx.z")
if cache_A:
AA = s.cache_read(A, "shared", [B])
if requires_thread_bind:
s[AA].bind(AA.op.axis[0], thread_x)
if cache_B:
BB = s.cache_write(B, "shared")
if requires_thread_bind:
s[BB].bind(BB.op.axis[0], thread_y)
if transform_A:
A_axis = s[A].transform_layout(lambda i: [i // 4, i % 4])
if transform_B:
B_axis = s[B].transform_layout(lambda i: [i // 4, i % 4])
else:
B_axis = B.op.axis
if requires_thread_bind:
s[B].bind(B_axis[0], thread_z)
return [s, [A, B]]
@tvm.testing.fixture
def ref_data(self, A_size, dtype, transform_A, transform_B):
a_np = (100 * np.random.uniform(size=A_size)).astype(dtype)
b_np = a_np
if transform_A:
a_np = a_np.reshape((-1, 4))
if transform_B:
b_np = b_np.reshape((-1, 4))
return a_np, b_np
def test_lower(self, schedule_args):
tvm.lower(*schedule_args)
def test_execute(self, target, dev, schedule_args, ref_data, dtype):
func = tvm.build(*schedule_args, target=target)
a_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=dtype, device=dev)
func(a, b)
if "int" in dtype:
np.testing.assert_equal(b.numpy(), b_np)
else:
tvm.testing.assert_allclose(b.numpy(), b_np)
def test_transform_with_reduction():
# To trigger this failure mode, the computation must use a
# reduction axis,
A = te.placeholder([16, 32, 64], dtype="float32", name="A")
k = te.reduce_axis((0, A.shape[-1]), name="k")
B = te.compute(A.shape[:-1], lambda i, j: te.sum(A[i, j, k], axis=[k]))
s = te.create_schedule(B.op)
# And the output of the computation must have a layout
# transformation applied.
s[B].transform_layout(lambda i, j: [j, i])
# When present, the failure occurred during tvm.lower, during the
# call to `tvm::te::PassDownBitMaskOr`.
tvm.lower(s, [A, B])
shape, transform = tvm.testing.parameters(
([1, 8], lambda n, i: [i, n]),
([1, 1, 8], lambda i, j, k: [j, te.AXIS_SEPARATOR, i, k]),
([1, 1, 8], lambda i, j, k: [i, te.AXIS_SEPARATOR, j, k]),
)
def test_size_one_buffer(shape, transform):
# This test is to catch a failure mode that occurred if a
# transformation were applied to a te.compute buffer, and one of
# the dimensions of the buffer was 1. Prior to bugfix,
# arith::DetectIterMap would fold the variable as a constant,
# causing an error when attempting to solve for the variable using
# arith::InverseAffineIterMap.
dtype = "int8"
A = te.placeholder(shape, dtype, name="A")
B = te.compute(
shape=A.shape,
fcompute=lambda *indices: A[indices].astype(dtype),
name="B",
)
s = te.create_schedule(B.op)
# If layout transformation is on the output buffer, and any
# dimension of the output buffer is 1, failure occurs in
# CheckFusePattern.
s[B].transform_layout(transform)
def test_non_divisible_transform_raises_error():
A = te.placeholder([1, 3, 8, 8])
B = te.compute(A.shape, lambda *indices: A[indices])
s = te.create_schedule(B.op)
transform = lambda n, c, h, w: [n, c // 4, h, w, c % 4]
# Error occurs here, because the transformation would introduce
# padding. Padded transforms are supported in TIR-based
# schedules.
with pytest.raises(tvm.TVMError):
s[B].transform_layout(transform)
if __name__ == "__main__":
tvm.testing.main()
| 20,387 | 33.381113 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_profiling_instr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.ir.module import IRModule
from tvm.script import tir as T
import numpy
default_lwp_test_config = {
"tir.instrument_lwp": True,
"tir.lwp_disable_func_prof": True,
"tir.reset_start_id": True,
}
@T.prim_func
def input1(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
for i, j in T.grid(8, 8):
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
@T.prim_func
def input2(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
for j in T.serial(0, 8):
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
for j in T.serial(0, 8):
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
@T.prim_func
def input3(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
for j in T.parallel(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
for j in T.serial(0, 8):
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
@T.prim_func
def test1_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
for i, j in T.grid(8, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
@T.prim_func
def test2_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test3_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test4_expected_output(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(3, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(3, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(5, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(5, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(7, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(8, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
T.evaluate(T.end_profile_intrinsic(8, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(10, dtype="handle"))
for k, l in T.grid(8, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(10, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(7, dtype="handle"))
@T.prim_func
def test5_expected_output(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
T.evaluate(T.start_profile_intrinsic(1, dtype="handle"))
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.serial(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * 2
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(1, dtype="handle"))
@T.prim_func
def test6_expected_output(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (8, 8, 128), dtype="int32")
B = T.match_buffer(b, (8, 8, 128), dtype="int32")
C = T.match_buffer(c, (8, 8, 128), dtype="int32")
D = T.match_buffer(d, (8, 8, 128), dtype="int32")
for i in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(2, dtype="handle"))
for j in T.parallel(0, 8):
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = A[vi, vj, vk * 16 + vl] * 2
for k in T.serial(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(2, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(7, dtype="handle"))
for j in T.serial(0, 8):
T.evaluate(T.start_profile_intrinsic(8, dtype="handle"))
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("C"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = B[vi, vj, vk * 16 + vl] + 2
T.evaluate(T.end_profile_intrinsic(8, dtype="handle"))
T.evaluate(T.start_profile_intrinsic(10, dtype="handle"))
for k in T.parallel(0, 8):
for l in T.serial(0, 16):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
C[vi, vj, vk * 16 + vl] = C[vi, vj, vk * 16 + vl] * D[vi, vj, vk * 16 + vl]
T.evaluate(T.end_profile_intrinsic(10, dtype="handle"))
T.evaluate(T.end_profile_intrinsic(7, dtype="handle"))
# By default, only loops with siblings are instrumented.
def test1():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test1_expected_output)
# By default, only loops with siblings are instrumented. Here, 'lwp_max_depth'
# doesn't have any effect unless 'instr_siblings' is set to False (ex: test3).
def test2():
test2_config = default_lwp_test_config.copy()
test2_config.update({"tir.lwp_max_depth": 3})
with tvm.transform.PassContext(config=test2_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test1_expected_output)
# test3: Use 'lwp_max_depth' to instrument loops upto a certain depth. This flag
# is effective only when 'instr_siblings' is disabled. Also, note that inner-most
# loops are always excluded from instrumentation unless overwritten using
# 'lwp_min_height' (ex: test5)
def test3():
test3_config = default_lwp_test_config.copy()
test3_config.update({"tir.lwp_max_depth": 3, "tir.instr_siblings": False})
with tvm.transform.PassContext(config=test3_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test3_expected_output)
# test4: Use 'lwp_min_height' to exclude inner loops upto a certain height from
# instrumentation.
def test4():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input2)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test4_expected_output)
# test5: Use both 'lwp_min_height' and 'lwp_max_depth'.
# instrumentation.
def test5():
test5_config = default_lwp_test_config.copy()
test5_config.update(
{"tir.lwp_max_depth": 3, "tir.instr_siblings": False, "tir.lwp_min_height": 2}
)
with tvm.transform.PassContext(config=test5_config):
mod = tvm.IRModule.from_expr(input1)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test5_expected_output)
# test6: Tests instrumentation for the parallel loops
def test6():
with tvm.transform.PassContext(config=default_lwp_test_config):
mod = tvm.IRModule.from_expr(input3)
mod = tvm.tir.transform.InstrumentProfileIntrinsics()(mod)
tvm.ir.assert_structural_equal(mod["main"], test6_expected_output)
if __name__ == "__main__":
tvm.testing.main()
| 16,056 | 46.087977 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_compute_at.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def two_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for ax0, ax1 in T.grid(1, 128):
with T.block("B"):
vi = T.axis.S(128, i + ax0)
vj = T.axis.S(128, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_1(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(8, 8):
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i, j])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i_i)
vj = T.axis.S(128, vj_o * 16 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i0_0, i1_0 in T.grid(8, 8):
for ax0, ax1 in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i0_0 * 16 + ax0)
vj = T.axis.S(128, i1_0 * 16 + ax1)
B[vi, vj] = A[vi, vj] * 2.0
with T.block("C_outer"):
vi_o, vj_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads([B[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16,
]])
T.writes([C[
vi_o * 16 : vi_o * 16 + 16,
vj_o * 16 : vj_o * 16 + 16
]])
for i0_1, i1_1 in T.grid(16, 16):
with T.block("C_inner"):
vi = T.axis.S(128, vi_o * 16 + i0_1)
vj = T.axis.S(128, vj_o * 16 + i1_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_o, j_o, i_i, j_i in T.grid(4, 4, 32, 32):
with T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(8, 8):
with T.block("B_outer"):
vio, vjo = T.axis.remap("SS", [i_o, j_o])
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B_inner"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for ax0, ax1 in T.grid(16, 16):
with T.block("C"):
vi = T.axis.S(128, i_o * 16 + ax0)
vj = T.axis.S(128, j_o * 16 + ax1)
T.reads([B[vi, vj]])
T.writes([C[vi, vj]])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def blockized_2_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_o, j_o in T.grid(4, 4):
for ax0, ax1 in T.grid(2, 2):
with T.block("blockized_B"):
vio = T.axis.S(8, i_o * 2 + ax0)
vjo = T.axis.S(8, j_o * 2 + ax1)
T.reads([A[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
T.writes([B[
vio * 16 : vio * 16 + 16,
vjo * 16 : vjo * 16 + 16,
]])
for i_i, j_i in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, vio * 16 + i_i)
vj = T.axis.S(128, vjo * 16 + j_i)
B[vi, vj] = A[vi, vj] * 2.0
for i_i, j_i in T.grid(32, 32):
with T.block("C"):
vi = T.axis.S(128, i_o * 32 + i_i)
vj = T.axis.S(128, j_o * 32 + j_i)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def cuda_matmul_0(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0_4 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1_4 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0_4, v1_4] = C_local[v0_4, v1_4]
@T.prim_func
def cuda_matmul_0_after_compute_at(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for i, j, k in T.grid(4, 4, 2048):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_1(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("A_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cuda_matmul_2(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared_local"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared_local[v0, v1] = B_shared[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k_0 in T.serial(0, 256):
for k_1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k_0 * 8 + k_1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k_0 * 8 + k_1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_3(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("A_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = T.float32(0)
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_4(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for i, j in T.grid(2048, 2048):
with T.block("B_shared"):
v0, v1 = T.axis.remap("SS", [i, j])
B_shared[v0, v1] = B[v0, v1]
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def cuda_matmul_5(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
A_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], "float32", scope="shared")
A_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
B_shared_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
C_local = T.alloc_buffer([2048, 2048], "float32", scope="local")
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for i, j in T.grid(8, 64):
with T.block("A_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, by * 64 + j)
A_shared[v0, v1] = A[v0, v1]
for i, j in T.grid(8, 64):
with T.block("B_shared"):
v0 = T.axis.S(2048, k0 * 8 + i)
v1 = T.axis.S(2048, bx * 64 + j)
B_shared[v0, v1] = B[v0, v1]
for k1 in T.unroll(0, 8):
for i, j in T.grid(1, 4):
with T.block("A_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + j)
A_shared_local[v0, v1] = A_shared[v0, v1]
for i, j in T.grid(1, 4):
with T.block("B_shared_local"):
v0 = T.axis.S(2048, k0 * 8 + k1 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
B_shared_local[v0, v1] = B_shared[v0, v1]
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
with T.init():
C_local[vi, vj] = 0.0
C_local[vi, vj] = C_local[vi, vj] + A_shared_local[vk, vi] * B_shared_local[vk, vj]
for i, j in T.grid(4, 4):
with T.block("C_local"):
v0 = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
v1 = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
C[v0, v1] = C_local[v0, v1]
@T.prim_func
def tiled(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], "float32")
B = T.alloc_buffer([128, 128], "float32")
C = T.match_buffer(c, [128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[vi, vj] = A[vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def tiled_trivial_binding_after_reverse_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1, 128, 128], "float32")
B = T.alloc_buffer([1, 128, 128], "float32")
C = T.match_buffer(c, [1, 128, 128], "float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(0, 16):
with T.block("B"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
B[0, vi, vj] = A[0, vi, vj] * 2.0
for j_1 in T.serial(0, 16):
with T.block("C"):
vi = T.axis.S(128, i_0 * 16 + i_1)
vj = T.axis.S(128, j_0 * 16 + j_1)
C[0, vi, vj] = B[0, vi, vj] + 1.0
@T.prim_func
def factorized(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x"):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj, vk = T.axis.remap("SR", [j, k])
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def factorized_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], "float32")
B = T.match_buffer(b, [16], "float32")
B_rf_local = T.alloc_buffer([16, 16], "float32", scope="local")
for j in T.thread_binding(0, 16, thread = "blockIdx.x"):
for i_o in T.thread_binding(0, 4, thread = "threadIdx.x"):
for i_i, k in T.grid(4, 16):
with T.block("B_rf"):
vi = T.axis.S(16, i_o * 4 + i_i)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k)
with T.init():
B_rf_local[vi, vj] = 0.0
B_rf_local[vi, vj] = B_rf_local[vi, vj] + A[vj, vi, vk]
for k in T.serial(0, 4):
with T.block("B"):
vi = T.axis.S(16, j)
vk = T.axis.R(16, i_o * 4 + k)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + B_rf_local[vk, vi]
@T.prim_func
def not_all_compact_data_flow(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 64):
with T.block("C_1"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def not_all_compact_data_flow_after_compute_at(a: T.handle, c: T.handle):
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 64):
for t in range(2):
with T.block("B"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j * 2 + t)
B[vi, vj] = A[vi, vj]
with T.block("C_1"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2] = B[vi, vj * 2] + 1.0
with T.block("C_2"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj * 2 + 1] = B[vi, vj * 2 + 1] * 2.0
@T.prim_func
def fail_subtree_compact_dataflow(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i in range(0, 128):
for j in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(128, i)
vj = T.axis.S(128, j + 64)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_consumers_under_loop(a: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def fail_all_producers_under_loop(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.match_buffer(d, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = B[vi, vj] + C[vi, vj]
@T.prim_func
def read_out_of_bound(a: T.handle, c:T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for i in T.serial(0, 16):
with T.block("B"):
v = T.axis.S(16, i)
B[v] = A[v]
for j in T.serial(0, 16):
with T.block("C"):
v = T.axis.S(16, j)
T.reads(B[v : v + 2])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def read_out_of_bound_after_compute_at(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16], "float32")
B = T.alloc_buffer([16], "float32")
C = T.match_buffer(c, [16], "float32")
for j in T.serial(0, 16):
for i in T.serial(0, 2):
with T.block("B"):
v = T.axis.S(16, j + i)
T.where(j + i < 16)
B[v] = A[v]
with T.block("C"):
v = T.axis.S(16, j)
T.reads([B[v : v + 2]])
C[v] = T.if_then_else(v < 15, T.max(B[v], B[v + 1]), B[v], dtype="float32")
@T.prim_func
def multi_reduction(A: T.Buffer((16, 16), "float32"), C: T.Buffer((), "float32")):
B = T.alloc_buffer((16, ), dtype="float32")
for i, k in T.grid(16, 16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
for k in T.grid(16):
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def multi_reduction_after_compute_at(
A: T.Buffer((16, 16), "float32"),
C:T.Buffer((), "float32"),
):
B = T.alloc_buffer((16, ), dtype="float32")
for k in T.grid(16):
for kk in T.grid(16):
with T.block("B"):
vi, vk = T.axis.remap("SR", [k, kk])
with T.init():
B[vi] = 0.0
B[vi] += A[vi, vk]
with T.block("C"):
vk = T.axis.remap("R", [k])
with T.init():
C[()] = 0.0
C[()] += B[vk]
@T.prim_func
def tiled_pooling_read_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh, ww in T.grid(224, 224):
with T.block("cache"):
h, w = T.axis.remap("SS", [hh, ww])
cache[h, w] = X[h, w]
for hh_0, ww_0, hh_1, ww_1, khh, kww in T.grid(28, 28, 8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def tiled_pooling_read_cache_after_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0, ax1 in T.grid(10, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(1 <= hh_0 * 8 + ax0 and hh_0 * 8 + ax0 < 225 and 1 <= ww_0 * 8 + ax1 and ww_0 * 8 + ax1 < 225)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(Y[h, w], T.if_then_else(
T.likely(1 <= h + kh, dtype="bool") and \
T.likely(h + kh < 225, dtype="bool") and \
T.likely(1 <= w + kw, dtype="bool") and \
T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32"))
@T.prim_func
def non_uniform_tiled_conv(x: T.Buffer((1, 3, 100, 100), "float32"),
w: T.Buffer((16, 3, 3, 3), "float32"),
y: T.Buffer((1, 16, 98, 98), "float32")) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for ax0, ax1, ax2, ax3 in T.grid(1, 3, 100, 100):
with T.block("cache"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for h_o, w_o, n, c_o, h_i, w_i, c_i, kh, kw in T.grid(7, 7, 1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \
x_global[nn, cc // 16 * 3 + rc, hh + rh, ww + rw] * w[cc, rc, rh, rw]
@T.prim_func
def non_uniform_tiled_conv_after_compute_at(x: T.Buffer((1, 3, 100, 100), "float32"),
w: T.Buffer((16, 3, 3, 3), "float32"),
y: T.Buffer((1, 16, 98, 98), "float32")) -> None:
x_global = T.alloc_buffer([1, 3, 100, 100], dtype="float32")
for h_o, w_o in T.grid(7, 7):
for ax0, ax1, ax2 in T.grid(3, 17, 17):
with T.block("cache"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(3, ax0)
v2 = T.axis.spatial(100, h_o * 15 + ax1)
v3 = T.axis.spatial(100, w_o * 15 + ax2)
T.where(h_o * 15 + ax1 < 100 and w_o * 15 + ax2 < 100)
x_global[v0, v1, v2, v3] = x[v0, v1, v2, v3]
for n, c_o, h_i, w_i, c_i, kh, kw in T.grid(1, 16, 15, 15, 3, 3, 3):
with T.block("compute"):
nn = T.axis.spatial(1, 0)
cc = T.axis.spatial(16, c_o)
hh = T.axis.spatial(98, h_o * 15 + h_i)
ww = T.axis.spatial(98, w_o * 15 + w_i)
rc, rh, rw = T.axis.remap("RRR", [c_i, kh, kw])
T.where(h_o * 15 + h_i < 98 and w_o * 15 + w_i < 98)
with T.init():
y[nn, cc, hh, ww] = T.float32(0)
y[nn, cc, hh, ww] = y[nn, cc, hh, ww] + \
x_global[nn, cc // 16 * 3 + rc, hh + rh, ww + rw] * w[cc, rc, rh, rw]
@T.prim_func
def concat_two_elemwise(x: T.Buffer((16,), "float32"),
y: T.Buffer((8,), "float32"),
T_concat: T.Buffer((24,), "float32")) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(16):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T_add_1[ax] = x[ax] + T.float32(1)
for i in T.serial(8):
with T.block("T_add_2"):
ax = T.axis.spatial(8, i)
T_add_2[ax] = y[ax] + T.float32(2)
for i in T.serial(24):
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def concat_two_elemwise_after_compute_at(x: T.Buffer((16,), "float32"),
y: T.Buffer((8,), "float32"),
T_concat: T.Buffer((24,), "float32")) -> None:
T_add_1 = T.alloc_buffer([16], dtype="float32")
T_add_2 = T.alloc_buffer([8], dtype="float32")
for i in T.serial(24):
with T.block("T_add_1"):
ax = T.axis.spatial(16, i)
T.where(i < 16)
T_add_1[ax] = x[ax] + T.float32(1)
with T.block("T_add_2"):
ax = T.axis.spatial(8, i - 16)
T.where(16 <= i)
T_add_2[ax] = y[ax] + T.float32(2)
with T.block("T_concat"):
ax = T.axis.spatial(24, i)
T_concat[ax] = T.if_then_else(16 <= ax, T_add_2[ax - 16], T_add_1[ax], dtype="float32")
@T.prim_func
def floordiv_and_floormod_indices(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16])
Y = T.match_buffer(b, [256])
temp = T.alloc_buffer([16, 16])
for i, j in T.grid(16, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + 1.0
for i in T.serial(0, 256):
with T.block("B"):
v_i = T.axis.remap("S", [i])
Y[v_i] = temp[v_i // 16, v_i % 16]
@T.prim_func
def floordiv_and_floormod_indices_after_reverse_compute_at(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [16, 16], dtype="float32")
Y = T.match_buffer(b, [256], dtype="float32")
temp = T.alloc_buffer([16, 16], dtype="float32")
for i in T.serial(0, 16):
for j in T.serial(0, 16):
with T.block("A"):
v_i, v_j = T.axis.remap("SS", [i, j])
temp[v_i, v_j] = X[v_j, v_i] + T.float32(1)
for ax0 in T.serial(0, 16):
with T.block("B"):
v_i = T.axis.spatial(256, i * 16 + ax0)
Y[v_i] = temp[v_i // 16, v_i % 16]
@T.prim_func
def recursive_floordiv_floormod(A: T.Buffer((16, 64, 1, 8, 8, 32), "float32"),
C: T.Buffer((3, 512, 512), "float32")) -> None:
T.func_attr({"tir.noalias": True})
# with T.block("root"):
B = T.alloc_buffer((1, 128, 16, 8, 2, 32, 2), "float32")
for axis1, axis2, axis3, axis4, axis5, axis6, axis7 in T.grid(1, 128, 16, 8, 2, 32, 2):
with T.block("In"):
v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7 = T.axis.remap("SSSSSSS", [axis1, axis2, axis3, axis4, axis5, axis6, axis7])
T.reads(A[(v_axis2 * 4 + v_axis5 * 2 + v_axis7) // 32, (v_axis3 * 32 + v_axis6) // 8, (v_axis1 * 8 + v_axis4) // 8, (v_axis3 * 32 + v_axis6) % 8, v_axis1 * 8 + v_axis4, (v_axis2 * 4 + v_axis5 * 2 + v_axis7) % 32])
T.writes(B[v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7])
B[v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7] = A[(v_axis2 * 4 + v_axis5 * 2 + v_axis7) // 32, (v_axis3 * 32 + v_axis6) // 8, (v_axis1 * 8 + v_axis4) // 8, (v_axis3 * 32 + v_axis6) % 8, v_axis1 * 8 + v_axis4, (v_axis2 * 4 + v_axis5 * 2 + v_axis7) % 32] + 3
for ax1, ax2, ax3 in T.grid(3, 512, 512):
with T.block("Out"):
v1, v2, v3 = T.axis.remap("SSS", [ax1, ax2, ax3])
T.reads(B[v1 // 8, v2 // 4, v3 // 32, v1, v2 % 4 // 2, v3 % 32, v2 % 2])
T.writes(C[v1, v2, v3])
C[v1, v2, v3] = B[v1 // 8, v2 // 4, v3 // 32, v1, v2 % 4 // 2, v3 % 32, v2 % 2] * 2
@T.prim_func
def recursive_floordiv_floormod_after_reverse_compute_at(A: T.Buffer((16, 64, 1, 8, 8, 32), "float32"), C: T.Buffer((3, 512, 512), "float32")) -> None:
T.func_attr({"tir.noalias": T.bool(True)})
# with T.block("root"):
B = T.alloc_buffer((1, 128, 16, 8, 2, 32, 2))
for axis1, axis2, axis3 in T.grid(1, 128, 16):
for axis4, axis5, axis6, axis7 in T.grid(8, 2, 32, 2):
with T.block("In"):
v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7 = T.axis.remap("SSSSSSS", [axis1, axis2, axis3, axis4, axis5, axis6, axis7])
T.reads(A[(v_axis2 * 4 + v_axis5 * 2 + v_axis7) // 32, (v_axis3 * 32 + v_axis6) // 8, (v_axis1 * 8 + v_axis4) // 8, (v_axis3 * 32 + v_axis6) % 8, v_axis1 * 8 + v_axis4, (v_axis2 * 4 + v_axis5 * 2 + v_axis7) % 32])
T.writes(B[v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7])
B[v_axis1, v_axis2, v_axis3, v_axis4, v_axis5, v_axis6, v_axis7] = A[(v_axis2 * 4 + v_axis5 * 2 + v_axis7) // 32, (v_axis3 * 32 + v_axis6) // 8, (v_axis1 * 8 + v_axis4) // 8, (v_axis3 * 32 + v_axis6) % 8, v_axis1 * 8 + v_axis4, (v_axis2 * 4 + v_axis5 * 2 + v_axis7) % 32] + T.float32(3)
for ax0, ax1, ax2 in T.grid(3, 4, 32):
with T.block("Out"):
v1 = T.axis.spatial(3, ax0)
v2 = T.axis.spatial(512, axis2 * 4 + ax1)
v3 = T.axis.spatial(512, axis3 * 32 + ax2)
T.reads(B[v1 // 8, v2 // 4, v3 // 32, v1, v2 % 4 // 2, v3 % 32, v2 % 2])
T.writes(C[v1, v2, v3])
C[v1, v2, v3] = B[v1 // 8, v2 // 4, v3 // 32, v1, v2 % 4 // 2, v3 % 32, v2 % 2] * T.float32(2)
@T.prim_func
def tiled_repeat_op(x: T.Buffer((4,), "float32"), T_repeat: T.Buffer((64,), "float32")) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0 in T.serial(4):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0)
T_add[ax0] = x[ax0] + 1.0
for i0_0, i0_1 in T.grid(8, 8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0 // 16]
@T.prim_func
def tiled_repeat_op_after_compute_at(x: T.Buffer((4,), "float32"), T_repeat: T.Buffer((64,), "float32")) -> None:
T_add = T.alloc_buffer([4], dtype="float32")
for i0_0 in T.serial(8):
with T.block("T_add"):
ax0 = T.axis.spatial(4, i0_0 // 2)
T_add[ax0] = x[ax0] + T.float32(1)
for i0_1 in T.serial(8):
with T.block("T_repeat"):
ax0 = T.axis.spatial(64, i0_0 * 8 + i0_1)
T_repeat[ax0] = T_add[ax0 // 16]
@T.prim_func
def static_bound(A: T.Buffer((32, 1), "float32"), C: T.Buffer((32, 1), "float32")) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i, j in T.grid(32, 1):
with T.block("B"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(32, 32):
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def static_bound_after_compute_at(A: T.Buffer((32, 1), "float32"), C: T.Buffer((32, 1), "float32")) -> None:
B = T.alloc_buffer((32, 1), "float32")
for i in range(32):
for ax0, ax1 in T.grid(1, 1):
with T.block("B"):
vi = T.axis.spatial(32, i + ax0)
vj = T.axis.spatial(1, ax1)
B[vi, vj] = A[vi, vj] * 2.0
for j in range(32):
with T.block("C"):
vi = T.axis.spatial(32, i)
vj = T.axis.spatial(1, j)
T.where(j < 1)
C[vi, vj] = B[vi, vj] + 1.0
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
# fmt: on
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_compute_at_two_elementwise(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops("C" if use_block_name else sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(two_elementwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_compute_at_blockized_1(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_outer"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_1)
def test_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("B_outer")
_, loop, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_2)
def test_compute_at_cuda_matmul_0(use_block_name):
sch = tir.Schedule(cuda_matmul_0, debug_mask="all")
block = sch.get_block("C")
_, _, _, _, _, loop, _, _ = sch.get_loops(sch.get_block("C_local"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_0_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_0)
def test_compute_at_cuda_matmul_1(use_block_name):
sch = tir.Schedule(cuda_matmul_1, debug_mask="all")
block = sch.get_block("A_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_1)
def test_compute_at_cuda_matmul_2(use_block_name):
sch = tir.Schedule(cuda_matmul_2, debug_mask="all")
block = sch.get_block("B_shared_local")
_, _, _, _, _, _, _, loop, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_3, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_2)
def test_compute_at_cuda_matmul_3(use_block_name):
sch = tir.Schedule(cuda_matmul_3, debug_mask="all")
block = sch.get_block("A_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_4, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_3)
def test_compute_at_cuda_matmul_4(use_block_name):
sch = tir.Schedule(cuda_matmul_4, debug_mask="all")
block = sch.get_block("B_shared")
_, _, _, _, _, _, loop, _, _, _, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(cuda_matmul_5, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=cuda_matmul_4)
def test_compute_at_reduction_block(use_block_name):
sch = tir.Schedule(multi_reduction, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(multi_reduction_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=multi_reduction)
def test_compute_at_tiled_pooling_read_cache(use_block_name):
sch = tir.Schedule(tiled_pooling_read_cache, debug_mask="all")
compute = sch.get_block("compute")
_, w_o, _, _, _, _ = sch.get_loops(compute)
cache = sch.get_block("cache")
sch.compute_at(cache, w_o)
tvm.ir.assert_structural_equal(tiled_pooling_read_cache_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_pooling_read_cache)
def test_compute_at_non_uniform_tiled_conv(use_block_name):
sch = tir.Schedule(non_uniform_tiled_conv, debug_mask="all")
compute = sch.get_block("compute")
sch.compute_at(sch.get_block("cache"), sch.get_loops(compute)[1])
tvm.ir.assert_structural_equal(non_uniform_tiled_conv_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_uniform_tiled_conv)
def test_compute_at_concat(use_block_name):
sch = tir.Schedule(concat_two_elemwise, debug_mask="all")
concat = sch.get_block("T_concat")
add1 = sch.get_block("T_add_1")
add2 = sch.get_block("T_add_2")
axis = sch.get_loops(concat)[0]
sch.compute_at(add1, axis)
sch.compute_at(add2, axis)
tvm.ir.assert_structural_equal(concat_two_elemwise_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=concat_two_elemwise)
def test_compute_at_tiled_repeat_op(use_block_name):
sch = tir.Schedule(tiled_repeat_op, debug_mask="all")
outer_ax, _ = sch.get_loops(sch.get_block("T_repeat"))
sch.compute_at(sch.get_block("T_add"), outer_ax)
tvm.ir.assert_structural_equal(tiled_repeat_op_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_repeat_op)
def test_compute_at_rev_iter():
@T.prim_func
def before(X: T.Buffer[(10, 10), "float32"], Z: T.Buffer[(10, 10), "float32"]):
Y = T.alloc_buffer([10, 10], "float32")
for i, j in T.grid(10, 10):
with T.block("b0"):
vi, vj = T.axis.remap("SS", [i, j])
Y[9 - vi, 9 - vj] = X[vi, vj] + 1.0
for i, j in T.grid(10, 10):
with T.block("b1"):
vi, vj = T.axis.remap("SS", [i, j])
Z[vi, vj] = Y[vj, vi] + 2.0
@T.prim_func
def after(X: T.Buffer[(10, 10), "float32"], Z: T.Buffer[(10, 10), "float32"]):
Y = T.alloc_buffer([10, 10], "float32")
for i in range(10):
for j in range(10):
with T.block("b0"):
vi = T.axis.spatial(10, j)
vj = T.axis.spatial(10, 9 - i)
Y[9 - vi, 9 - vj] = X[vi, vj] + 1.0
for j in range(10):
with T.block("b1"):
vi, vj = T.axis.remap("SS", [i, j])
Z[vi, vj] = Y[vj, vi] + 2.0
sch = tir.Schedule(before, debug_mask="all")
axis = sch.get_loops(sch.get_block("b1"))[0]
sch.compute_at(sch.get_block("b0"), axis)
tvm.ir.assert_structural_equal(after, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=before)
def test_reverse_compute_at_tiled(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled)
def test_reverse_compute_at_tiled_trivial_binding(use_block_name):
sch = tir.Schedule(tiled_trivial_binding, debug_mask="all")
block = sch.get_block("C")
_, _, loop, _ = sch.get_loops(sch.get_block("B"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(tiled_trivial_binding_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=tiled_trivial_binding)
def test_reverse_compute_at_blockized_2(use_block_name):
sch = tir.Schedule(blockized_2, debug_mask="all")
block = sch.get_block("C")
_, loop = sch.get_loops(sch.get_block("B_outer"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(blockized_2_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=blockized_2)
def test_reverse_compute_at_factorized(use_block_name):
sch = tir.Schedule(factorized, debug_mask="all")
block = sch.get_block("B")
_, loop, _, _ = sch.get_loops(sch.get_block("B_rf"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=False)
tvm.ir.assert_structural_equal(factorized_after_reverse_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=factorized)
def test_reverse_compute_at_floordiv_and_floormod_indices(use_block_name):
sch = tir.Schedule(floordiv_and_floormod_indices, debug_mask="all")
A = sch.get_block("A")
B = sch.get_block("B")
sch.reverse_compute_at(B, sch.get_loops(A)[0])
tvm.ir.assert_structural_equal(
floordiv_and_floormod_indices_after_reverse_compute_at, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=floordiv_and_floormod_indices)
def test_reverse_compute_at_floordiv_and_floormod_recursive(use_block_name):
sch = tir.Schedule(recursive_floordiv_floormod, debug_mask="all")
write_block = sch.get_block("Out")
sch.reverse_compute_at(write_block, sch.get_loops("In")[2])
tvm.ir.assert_structural_equal(
recursive_floordiv_floormod_after_reverse_compute_at, sch.mod["main"]
)
verify_trace_roundtrip(sch=sch, mod=recursive_floordiv_floormod)
def test_read_out_of_bound(use_block_name):
sch = tir.Schedule(read_out_of_bound, debug_mask="all")
block = sch.get_block("B")
(loop,) = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(read_out_of_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=read_out_of_bound)
def test_compact_dataflow(use_block_name):
sch = tir.Schedule(not_all_compact_data_flow, debug_mask="all")
block = sch.get_block("B")
_, loop = sch.get_loops(sch.get_block("C_1"))
sch.compute_at(block, loop)
tvm.ir.assert_structural_equal(not_all_compact_data_flow_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=not_all_compact_data_flow)
def test_compute_at_simplify_static_bound(use_block_name):
sch = tir.Schedule(static_bound, debug_mask="all")
block = sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
sch.compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(static_bound_after_compute_at, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=static_bound)
def test_compute_at_simplify_symbolic_predicate():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(x: T.handle, y: T.handle, n: T.int64):
X = T.match_buffer(x, (T.int64(8), n * 32), "float32")
Y = T.match_buffer(y, (T.int64(8), n * 32), "float32")
for i, k in T.grid(T.int64(8), n * 32):
with T.block("Y"):
vi, vk = T.axis.remap("SS", [i, k])
Y[vi, vk] = X[vi, vk]
@tvm.script.ir_module
class After:
@T.prim_func
def main(x: T.handle, y: T.handle, n: T.int64):
X = T.match_buffer(x, (T.int64(8), n * T.int64(32)))
Y = T.match_buffer(y, (T.int64(8), n * T.int64(32)))
X_global = T.alloc_buffer((T.int64(8), n * T.int64(32)))
for i, k_0 in T.grid(T.int64(8), n):
for ax0 in range(T.int64(32)):
with T.block("X_global"):
v0 = T.axis.spatial(T.int64(8), i)
v1 = T.axis.spatial(n * T.int64(32), k_0 * T.int64(32) + ax0)
X_global[v0, v1] = X[v0, v1]
for k_1 in range(T.int64(32)):
with T.block("Y"):
vi = T.axis.spatial(T.int64(8), i)
vk = T.axis.spatial(n * T.int64(32), k_0 * T.int64(32) + k_1)
Y[vi, vk] = X_global[vi, vk]
sch = tir.Schedule(Before, debug_mask="all")
block = sch.get_block("Y")
i, k = sch.get_loops(sch.get_block("Y"))
ko, ki = sch.split(k, [None, 32])
XX = sch.cache_read(block, 0, "global")
sch.compute_at(XX, ko)
tvm.ir.assert_structural_equal(sch.mod, After)
def test_compute_at_non_perfect_channel_group(use_block_name):
@T.prim_func
def grouped_channel_bias(
X: T.Buffer((720, 8, 8), "float32"), Y: T.Buffer((720, 8, 8), "float32")
):
B = T.alloc_buffer([45], dtype="float32", scope="")
for i in T.grid(45):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = vi
for c_o, h, w, c_i in T.grid(2, 8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc // 16]
@T.prim_func
def grouped_channel_bias_non_perfect_tiled(
X: T.Buffer((720, 8, 8), "float32"), Y: T.Buffer((720, 8, 8), "float32")
):
B = T.alloc_buffer([45], dtype="float32")
for c_o in range(2):
for ax0 in range(23):
with T.block("init"):
vi = T.axis.spatial(45, c_o * 22 + ax0)
B[vi] = vi
for h, w, c_i in T.grid(8, 8, 360):
with T.block("compute"):
hh, ww = T.axis.remap("SS", [h, w])
cc = T.axis.spatial(720, c_o * 360 + c_i)
Y[cc, hh, ww] = X[cc, hh, ww] + B[cc // 16]
sch = tir.Schedule(grouped_channel_bias, debug_mask="all")
loop = sch.get_loops(sch.get_block("compute"))[0]
sch.compute_at(sch.get_block("init"), loop)
tvm.ir.assert_structural_equal(sch.mod["main"], grouped_channel_bias_non_perfect_tiled)
def test_fail_subtree_complete_block(use_block_name):
sch = tir.Schedule(fail_subtree_compact_dataflow, debug_mask="all")
block = sch.get_block("B_0")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="complete block"):
sch.compute_at(block, loop)
def test_fail_not_in_same_scope(use_block_name):
sch = tir.Schedule(blockized_1, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C_inner"))
with pytest.raises(tvm.tir.ScheduleError, match="same block scope"):
sch.compute_at(block, loop)
def test_fail_loop_is_ancestor_of_block(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="ancestor of block"):
sch.compute_at(block, loop)
def test_fail_output_block(use_block_name):
sch = tir.Schedule(tiled, debug_mask="all")
block = "C" if use_block_name else sch.get_block("C")
loop, _, _, _ = sch.get_loops(sch.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError, match="output block"):
sch.compute_at(block, loop)
def test_fail_all_consumers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_consumers_under_loop, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the consumer"):
sch.compute_at(block, loop)
def test_fail_all_producers_under_loop(use_block_name):
sch = tir.Schedule(fail_all_producers_under_loop, debug_mask="all")
block = "D" if use_block_name else sch.get_block("D")
loop, _ = sch.get_loops(sch.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError, match="requires all the producer"):
sch.reverse_compute_at(block, loop)
def test_compute_at_int64_loop(use_block_name):
def _create_prim_func():
n = te.var("n", dtype="int64")
m = te.var("m", dtype="int64")
A = te.placeholder((n, m), name="A", dtype="float32")
B = te.placeholder((n, m), name="B", dtype="float32")
C = te.compute((n, m), lambda i, j: A[i, j] + B[i, j], name="C")
D = te.compute((n, m), lambda i, j: C[i, j] + 1.0, name="D")
return te.create_prim_func([A, B, D])
mod = _create_prim_func()
sch = tir.Schedule(mod, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
block_d = "D" if use_block_name else sch.get_block("D")
i, _ = sch.get_loops(block_d)
sch.compute_at(block_c, i)
verify_trace_roundtrip(sch=sch, mod=mod)
def test_compute_at_to_index():
@T.prim_func
def multi_producers_conv(
data: T.Buffer((1, 3, 224, 224), "int8"),
w: T.Buffer((16, 3, 7, 7), "int8"),
conv: T.Buffer((1, 16, 112, 112), "int32"),
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0, i1, i2, i3 in T.grid(1, 3, 230, 230):
with T.block("pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data[i0_1, i1_1, i2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for i0 in T.serial(1):
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
@T.prim_func
def multi_producers_after_compute_at(
data: T.Buffer((1, 3, 224, 224), "int8"),
w: T.Buffer((16, 3, 7, 7), "int8"),
conv: T.Buffer((1, 16, 112, 112), "int32"),
) -> None:
pad = T.alloc_buffer([1, 3, 230, 230], dtype="int8")
wbuf = T.alloc_buffer([16, 3, 7, 7], dtype="int8")
for i0 in T.serial(1):
for ax0, ax1, ax2 in T.grid(3, 229, 229):
with T.block("pad"):
i0_1 = T.axis.spatial(1, 0)
i1_1 = T.axis.spatial(3, ax0)
i2_1 = T.axis.spatial(230, ax1)
i3_1 = T.axis.spatial(230, ax2)
T.reads(data[i0_1, i1_1, i2_1 - 3, i3_1 - 3])
T.writes(pad[i0_1, i1_1, i2_1, i3_1])
pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i2_1 and i2_1 < 227 and 3 <= i3_1 and i3_1 < 227,
data[i0_1, i1_1, i2_1 - 3, i3_1 - 3],
T.int8(0),
dtype="int8",
)
for ax0, ax1, ax2, ax3 in T.grid(16, 3, 7, 7):
with T.block("wbuf"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(w[v0, v1, v2, v3])
T.writes(wbuf[v0, v1, v2, v3])
wbuf[v0, v1, v2, v3] = w[v0, v1, v2, v3]
for i1, i2, i3, i4, i5, i6 in T.grid(16, 112, 112, 3, 7, 7):
with T.block("conv"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap(
"SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]
)
T.reads(pad[nn, rc, yy * 2 + ry, xx * 2 + rx], wbuf[ff, rc, ry, rx])
T.writes(conv[nn, ff, yy, xx])
with T.init():
conv[nn, ff, yy, xx] = 0
conv[nn, ff, yy, xx] = conv[nn, ff, yy, xx] + T.cast(
pad[nn, rc, yy * 2 + ry, xx * 2 + rx], "int32"
) * T.cast(wbuf[ff, rc, ry, rx], "int32")
sch = tir.Schedule(multi_producers_conv, debug_mask="all")
block_c = sch.get_block("pad")
axis = sch.get_loops("conv")[0]
sch.compute_at(block_c, axis, index=-2)
tvm.ir.assert_structural_equal(multi_producers_after_compute_at, sch.mod["main"])
def test_reverse_compute_at_to_index():
@T.prim_func
def main(A: T.Buffer((128, 128), "float32"), D: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def main_reverse_compute_at(
A: T.Buffer((128, 128), "float32"), D: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
C = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(8, 8, 16):
for j_1 in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0 in T.serial(16):
with T.block("D"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = B[vi, vj] + T.float32(1)
for ax0 in T.serial(16):
with T.block("C"):
vi = T.axis.spatial(128, i_0 * 16 + i_1)
vj = T.axis.spatial(128, j_0 * 16 + ax0)
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
sch = tir.Schedule(main, debug_mask="all")
block_c = sch.get_block("D")
axis = sch.get_loops("B")[2]
sch.reverse_compute_at(block_c, axis, index=1)
tvm.ir.assert_structural_equal(main_reverse_compute_at, sch.mod["main"])
def test_reverse_compute_at_with_unit_loop():
@T.prim_func
def main(A: T.Buffer((128, 128), "float32"), D: T.Buffer((1, 2, 1), "float32")) -> None:
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(T.int64(8), T.int64(8), T.int64(16)):
for j_1 in T.serial(T.int64(16)):
with T.block("B"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0, ax1, ax2 in T.grid(T.int64(1), T.int64(2), T.int64(1)):
with T.block("D"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(B[v0, v1])
T.writes(D[v0, v1, v2])
D[v0, v1, v2] = B[v0, v1] + T.float32(1)
@T.prim_func
def main_reverse_compute_at(
A: T.Buffer((128, 128), "float32"), D: T.Buffer((1, 2, 1), "float32")
):
B = T.alloc_buffer([128, 128], dtype="float32")
for i_0, j_0, i_1 in T.grid(T.int64(8), T.int64(8), T.int64(16)):
for j_1 in T.serial(T.int64(16)):
with T.block("B"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for ax0, ax1, ax2 in T.grid(T.int64(1), T.int64(16), T.int64(1)):
with T.block("D"):
T.where(
i_0 * T.int64(16) + i_1 < T.int64(1)
and j_0 * T.int64(16) + ax1 < T.int64(2)
)
v0 = T.axis.spatial(T.int64(1), i_0 * T.int64(16) + i_1 + ax0)
v1 = T.axis.spatial(T.int64(2), j_0 * T.int64(16) + ax1)
v2 = T.axis.spatial(T.int64(1), ax2)
T.reads(B[v0, v1])
T.writes(D[v0, v1, v2])
D[v0, v1, v2] = B[v0, v1] + T.float32(1)
sch = tir.Schedule(main, debug_mask="all")
block_d = sch.get_block("D")
axis = sch.get_loops("B")[2]
sch.reverse_compute_at(block_d, axis, preserve_unit_loops=True, index=1)
tvm.ir.assert_structural_equal(main_reverse_compute_at, sch.mod["main"])
def test_reverse_compute_at_layout_trans():
@T.prim_func
def before(A: T.Buffer((1, 3, 5, 5, 16), "float32"), C: T.Buffer((1, 6, 5, 5, 8), "float32")):
B = T.alloc_buffer((1, 3, 5, 5, 16))
for i0, i1, i2, i3, i4 in T.grid(1, 3, 5, 5, 16):
with T.block("compute"):
v_i0, v_i1, v_i2, v_i3, v_i4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
B[v_i0, v_i1, v_i2, v_i3, v_i4] = A[v_i0, v_i1, v_i2, v_i3, v_i4] + T.float32(1)
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 6, 5, 5, 8):
with T.block("T_layout_trans"):
v_ax0, v_ax1, v_ax2, v_ax3, v_ax4 = T.axis.remap("SSSSS", [ax0, ax1, ax2, ax3, ax4])
C[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4] = B[
v_ax0, (v_ax1 * 8 + v_ax4) // 16, v_ax2, v_ax3, (v_ax1 * 8 + v_ax4) % 16
]
@T.prim_func
def after(A: T.Buffer((1, 3, 5, 5, 16), "float32"), C: T.Buffer((1, 6, 5, 5, 8), "float32")):
B = T.alloc_buffer((1, 3, 5, 5, 16))
for i0, i1 in T.grid(1, 3):
for i2, i3, i4 in T.grid(5, 5, 16):
with T.block("compute"):
v_i0, v_i1, v_i2, v_i3, v_i4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
B[v_i0, v_i1, v_i2, v_i3, v_i4] = A[v_i0, v_i1, v_i2, v_i3, v_i4] + T.float32(1)
for ax0, ax1, ax2, ax3 in T.grid(2, 5, 5, 8):
with T.block("T_layout_trans"):
v_ax0 = T.axis.spatial(1, 0)
v_ax1 = T.axis.spatial(6, i1 * 2 + ax0)
v_ax2, v_ax3, v_ax4 = T.axis.remap("SSS", [ax1, ax2, ax3])
C[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4] = B[
v_ax0, (v_ax1 * 8 + v_ax4) // 16, v_ax2, v_ax3, (v_ax1 * 8 + v_ax4) % 16
]
sch = tir.Schedule(before, debug_mask="all")
trans = sch.get_block("T_layout_trans")
axis = sch.get_loops("compute")[1]
sch.reverse_compute_at(trans, axis)
tvm.ir.assert_structural_equal(after, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=before)
@pytest.mark.parametrize("use_decl_buffer", [True, False])
@pytest.mark.parametrize("use_reverse_compute_at", [True, False])
def test_compute_at_allocate_const(use_decl_buffer, use_reverse_compute_at):
def apply_decl_buffer(*args, **kwargs):
if use_decl_buffer:
return T.decl_buffer(*args, **kwargs)
else:
return T.Buffer(*args, **kwargs)
@T.prim_func
def before(A: T.Buffer([4, 256], "float32"), C: T.Buffer([4, 256], "float32")):
B = T.alloc_buffer([4])
offset_ptr = T.allocate_const([1.0, 2.0, 3.0, 4.0], dtype="float32", extents=[4])
offset = apply_decl_buffer([4], data=offset_ptr)
for i in range(4):
with T.block("compute_B"):
vi = T.axis.remap("S", [i])
B[vi] = 10.0 * vi + offset[vi]
for i, j in T.grid(4, 256):
with T.block("compute_C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi] + 100.0 * vj
@T.prim_func
def expected(A: T.Buffer([4, 256], "float32"), C: T.Buffer([4, 256], "float32")):
B = T.alloc_buffer([4])
offset_ptr = T.allocate_const([1.0, 2.0, 3.0, 4.0], dtype="float32", extents=[4])
offset = apply_decl_buffer([4], data=offset_ptr)
for i in range(4):
with T.block("compute_B"):
vi = T.axis.remap("S", [i])
B[vi] = 10.0 * vi + offset[vi]
for j in range(256):
with T.block("compute_C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi] + 100.0 * vj
sch = tir.Schedule(before, debug_mask="all")
if use_reverse_compute_at:
block = sch.get_block("compute_C")
axis = sch.get_loops("compute_B")[0]
sch.reverse_compute_at(block, axis)
else:
block = sch.get_block("compute_B")
axis = sch.get_loops("compute_C")[0]
sch.compute_at(block, axis)
after = sch.mod["main"]
tvm.ir.assert_structural_equal(expected, after)
verify_trace_roundtrip(sch=sch, mod=before)
@pytest.mark.parametrize("use_decl_buffer", [True, False])
def test_compute_inline_allocate_const(use_decl_buffer):
def apply_decl_buffer(*args, **kwargs):
if use_decl_buffer:
return T.decl_buffer(*args, **kwargs)
else:
return T.Buffer(*args, **kwargs)
@T.prim_func
def before(A: T.Buffer([4, 256], "float32"), C: T.Buffer([4, 256], "float32")):
B = T.alloc_buffer([4])
offset_ptr = T.allocate_const([1.0, 2.0, 3.0, 4.0], dtype="float32", extents=[4])
offset = apply_decl_buffer([4], data=offset_ptr)
for i in range(4):
with T.block("compute_B"):
vi = T.axis.remap("S", [i])
B[vi] = 10.0 * vi + offset[vi]
for i, j in T.grid(4, 256):
with T.block("compute_C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi] + 100.0 * vj
@T.prim_func
def expected(A: T.Buffer([4, 256], "float32"), C: T.Buffer([4, 256], "float32")):
offset_ptr = T.allocate_const([1.0, 2.0, 3.0, 4.0], dtype="float32", extents=[4])
offset = apply_decl_buffer([4], data=offset_ptr)
for i, j in T.grid(4, 256):
with T.block("compute_C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = (10.0 * vi + offset[vi]) + 100.0 * vj
sch = tir.Schedule(before, debug_mask="all")
block = sch.get_block("compute_B")
sch.compute_inline(block)
after = sch.mod["main"]
tvm.ir.assert_structural_equal(expected, after)
verify_trace_roundtrip(sch=sch, mod=before)
def test_shape_var_as_bound():
# fmt: off
@T.prim_func
def before(a: T.handle, b: T.handle, c: T.handle):
n = T.int32()
A = T.match_buffer(a, (32, 1, 128))
B = T.match_buffer(b, (32, n, 128))
C = T.match_buffer(c, (32, 1, n))
# with T.block("root"):
C_rf = T.alloc_buffer((128, 32, 1, n))
for ax0_ax1_fused, ax2_fused_1, ax2_fused_0 in T.grid(n * 32, 128, 1):
with T.block("NT_matmul_rf"):
vax2_fused_1 = T.axis.spatial(128, ax2_fused_1)
v0 = T.axis.spatial(32, ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
vax2_fused_0 = T.axis.reduce(1, ax2_fused_0)
T.reads(A[v0, 0, vax2_fused_0 * 128 + vax2_fused_1], B[v0, v1, vax2_fused_0 * 128 + vax2_fused_1])
T.writes(C_rf[vax2_fused_1, v0, 0, v1])
with T.init():
C_rf[vax2_fused_1, v0, 0, v1] = T.float32(0)
C_rf[vax2_fused_1, v0, 0, v1] = C_rf[vax2_fused_1, v0, 0, v1] + A[v0, 0, vax2_fused_0 * 128 + vax2_fused_1] * B[v0, v1, vax2_fused_0 * 128 + vax2_fused_1]
for ax0_ax1_fused, ax2_fused_1 in T.grid(n * 32, 128):
with T.block("NT_matmul"):
vax2_fused_1 = T.axis.reduce(128, ax2_fused_1)
v0 = T.axis.spatial(32, ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.reads(C_rf[vax2_fused_1, v0, 0, v1])
T.writes(C[v0, 0, v1])
with T.init():
C[v0, 0, v1] = T.float32(0)
C[v0, 0, v1] = C[v0, 0, v1] + C_rf[vax2_fused_1, v0, 0, v1]
@T.prim_func
def expected(A: T.Buffer((32, 1, 128), "float32"), b: T.handle, c: T.handle):
n = T.int32()
B = T.match_buffer(b, (32, n, 128))
C = T.match_buffer(c, (32, 1, n))
# with T.block("root"):
C_rf = T.alloc_buffer((128, 32, 1, n))
for ax0_ax1_fused in range(n * 32):
for ax2_fused_1, ax2_fused_0 in T.grid(128, 1):
with T.block("NT_matmul_rf"):
vax2_fused_1 = T.axis.spatial(128, ax2_fused_1)
v0 = T.axis.spatial(32, ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
vax2_fused_0 = T.axis.reduce(1, ax2_fused_0)
T.reads(A[v0, 0, vax2_fused_0 * 128 + vax2_fused_1], B[v0, v1, vax2_fused_0 * 128 + vax2_fused_1])
T.writes(C_rf[vax2_fused_1, v0, 0, v1])
with T.init():
C_rf[vax2_fused_1, v0, 0, v1] = T.float32(0)
C_rf[vax2_fused_1, v0, 0, v1] = C_rf[vax2_fused_1, v0, 0, v1] + A[v0, 0, vax2_fused_0 * 128 + vax2_fused_1] * B[v0, v1, vax2_fused_0 * 128 + vax2_fused_1]
for ax0, ax1, ax2 in T.grid(128, 1, 1):
with T.block("NT_matmul"):
vax2_fused_1 = T.axis.reduce(128, ax0)
v0 = T.axis.spatial(32, ax0_ax1_fused // n + ax1)
v1 = T.axis.spatial(n, ax0_ax1_fused % n + ax2)
T.reads(C_rf[vax2_fused_1, v0, 0, v1])
T.writes(C[v0, 0, v1])
with T.init():
C[v0, 0, v1] = T.float32(0)
C[v0, 0, v1] = C[v0, 0, v1] + C_rf[vax2_fused_1, v0, 0, v1]
# fmt: on
sch = tir.Schedule(before, debug_mask="all")
block = sch.get_block("NT_matmul")
loop, _, _ = sch.get_loops(sch.get_block("NT_matmul_rf"))
sch.reverse_compute_at(block, loop, preserve_unit_loops=True)
tvm.ir.assert_structural_equal(sch.mod["main"], expected, True)
if __name__ == "__main__":
tvm.testing.main()
| 88,534 | 45.67106 | 302 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_module_export.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
from tvm.relay import testing
import tvm
from tvm import te
import tvm.testing
from tvm.contrib import utils
import os
header_file_dir_path = utils.tempdir()
def gen_engine_header():
code = r"""
#ifndef _ENGINE_H_
#define _ENGINE_H_
#include <cstdint>
#include <string>
#include <sstream>
#include <vector>
class Engine {
};
#endif
"""
header_file = header_file_dir_path.relpath("gcc_engine.h")
with open(header_file, "w") as f:
f.write(code)
def generate_engine_module():
code = r"""
#include <tvm/runtime/c_runtime_api.h>
#include <dlpack/dlpack.h>
#include "gcc_engine.h"
extern "C" void gcc_1_(float* gcc_input4, float* gcc_input5,
float* gcc_input6, float* gcc_input7, float* out) {
Engine engine;
}
"""
import tvm.runtime._ffi_api
gen_engine_header()
csource_module = tvm.runtime._ffi_api.CSourceModuleCreate(code, "cc", [], None)
return csource_module
@tvm.testing.uses_gpu
def test_mod_export():
def verify_gpu_mod_export(obj_format):
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload()
synthetic_llvm_mod, synthetic_llvm_params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
_, synthetic_gpu_lib, _ = relay.build_module.build(
synthetic_mod, "cuda", params=synthetic_params, mod_name="cudalib"
)
_, synthetic_llvm_cpu_lib, _ = relay.build_module.build(
synthetic_llvm_mod, "llvm", params=synthetic_llvm_params, mod_name="llvmlib"
)
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
synthetic_gpu_lib.import_module(synthetic_llvm_cpu_lib)
synthetic_gpu_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
assert loaded_lib.imported_modules[0].type_key == "cuda"
# dso modules are merged together
assert len(loaded_lib.imported_modules) == 1
def verify_multi_dso_mod_export(obj_format):
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
mod0 = tvm.build(s, [A, B], "llvm", name="myadd0")
mod1 = tvm.build(s, [A, B], "llvm", name="myadd1")
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
mod0.import_module(mod1)
mod0.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
# dso modules are merged
assert len(loaded_lib.imported_modules) == 0
def verify_json_import_dso(obj_format):
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
# Get subgraph Json.
subgraph_json = (
"json_rt_0\n"
+ "input 0 10 10\n"
+ "input 1 10 10\n"
+ "input 2 10 10\n"
+ "input 3 10 10\n"
+ "add 4 inputs: 0 1 shape: 10 10\n"
+ "sub 5 inputs: 4 2 shape: 10 10\n"
+ "mul 6 inputs: 5 3 shape: 10 10\n"
+ "json_rt_1\n"
+ "input 0 10 10\n"
+ "input 1 10 10\n"
+ "input 2 10 10\n"
+ "input 3 10 10\n"
+ "add 4 inputs: 0 1 shape: 10 10\n"
+ "sub 5 inputs: 4 2 shape: 10 10\n"
+ "mul 6 inputs: 5 3 shape: 10 10"
)
temp = utils.tempdir()
subgraph_path = temp.relpath("subgraph.examplejson")
with open(subgraph_path, "w") as f:
f.write(subgraph_json)
# Get Json and module.
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm", name="myadd")
try:
ext_lib = tvm.runtime.load_module(subgraph_path, "examplejson")
except:
print("skip because Loader of examplejson is not presented")
return
ext_lib.import_module(f)
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
ext_lib.export_library(path_lib)
lib = tvm.runtime.load_module(path_lib)
assert lib.type_key == "examplejson"
assert lib.imported_modules[0].type_key == "library"
def verify_multi_c_mod_export():
from shutil import which
if which("gcc") is None:
print("Skip test because gcc is not available.")
for device in ["llvm"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
_, synthetic_cpu_lib, _ = relay.build_module.build(
synthetic_mod, "llvm", params=synthetic_params
)
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "c", name="myadd")
engine_module = generate_engine_module()
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
synthetic_cpu_lib.import_module(f)
synthetic_cpu_lib.import_module(engine_module)
kwargs = {"options": ["-O2", "-std=c++17", "-I" + header_file_dir_path.relpath("")]}
work_dir = temp.relpath("work_dir")
os.mkdir(work_dir)
synthetic_cpu_lib.export_library(path_lib, fcompile=False, workspace_dir=work_dir, **kwargs)
assert os.path.exists(os.path.join(work_dir, "devc.o"))
loaded_lib = tvm.runtime.load_module(path_lib)
assert loaded_lib.type_key == "library"
# dso modules are merged
assert len(loaded_lib.imported_modules) == 0
for obj_format in [".so", ".tar"]:
verify_gpu_mod_export(obj_format)
verify_multi_dso_mod_export(obj_format)
verify_json_import_dso(obj_format)
verify_multi_c_mod_export()
@tvm.testing.requires_llvm
def test_import_static_library():
# Generate two LLVM modules.
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
mod0 = tvm.build(s, [A, B], "llvm", name="myadd0")
mod1 = tvm.build(s, [A, B], "llvm", name="myadd1")
assert mod0.implements_function("myadd0")
assert mod1.implements_function("myadd1")
assert mod1.is_dso_exportable
# mod1 is currently an 'llvm' module.
# Save and reload it as a vanilla 'static_library'.
temp = utils.tempdir()
mod1_o_path = temp.relpath("mod1.o")
mod1.save(mod1_o_path)
mod1_o = tvm.runtime.load_static_library(mod1_o_path, ["myadd1"])
assert mod1_o.implements_function("myadd1")
assert mod1_o.is_dso_exportable
# Import mod1 as a static library into mod0 and compile to its own DSO.
mod0.import_module(mod1_o)
mod0_dso_path = temp.relpath("mod0.so")
mod0.export_library(mod0_dso_path)
# The imported mod1 is statically linked into mod0.
loaded_lib = tvm.runtime.load_module(mod0_dso_path)
assert loaded_lib.type_key == "library"
assert len(loaded_lib.imported_modules) == 0
assert loaded_lib.implements_function("myadd0")
assert loaded_lib.get_function("myadd0")
assert loaded_lib.implements_function("myadd1")
assert loaded_lib.get_function("myadd1")
assert not loaded_lib.is_dso_exportable
if __name__ == "__main__":
tvm.testing.main()
| 9,574 | 35.268939 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_set_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_scope(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_shared[vi, vj] + T.float32(1)
@T.prim_func
def element_wise_subregion_match(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_scope(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0_shared = T.match_buffer(B_shared[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
B_subregion0_shared[()] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1_shared = T.match_buffer(B_shared[vi, vj], [], dtype="float32", scope="shared", offset_factor=1)
C[vi, vj] = B_subregion1_shared[()] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
use_buffer_name = tvm.testing.parameter(by_dict={"buffer_index": False, "buffer_name": True})
def test_set_scope(use_block_name, use_buffer_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
s.set_scope('B' if use_block_name else s.get_block("B"), 'B' if use_buffer_name else 0, "shared")
tvm.ir.assert_structural_equal(element_wise_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_set_scope_fail_on_output_buffer(use_block_name, use_buffer_name):
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope('C' if use_block_name else s.get_block("C"), 'C' if use_buffer_name else 0, "shared")
def test_set_scope_fail_on_index_out_of_bound():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 1, "shared")
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), -1, "shared")
def test_set_scope_fail_on_invalid_scope():
func = element_wise
s = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
s.set_scope(s.get_block("B"), 0, "test_scope")
def test_set_scope_subregion():
func = element_wise_subregion_match
s = tir.Schedule(func, debug_mask='all')
s.set_scope(s.get_block("B"), 0, "shared")
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_scope, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
if __name__ == "__main__":
tvm.testing.main()
| 5,331 | 38.791045 | 123 | py |
tvm | tvm-main/tests/python/unittest/test_tir_reorder_block_iter_var.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
@T.prim_func
def matmul(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_after_reorder_block_iter_var(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
):
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vk, vj, vi = T.axis.remap("RSS", [k, j, i])
T.reads(A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_reorder_block_iter_var():
sch = tir.Schedule(matmul, debug_mask="all")
C = sch.get_block("C")
sch.reorder_block_iter_var(C, [2, 1, 0])
tvm.ir.assert_structural_equal(matmul_after_reorder_block_iter_var, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul)
def test_reorder_block_iter_var_fail_not_full():
sch = tir.Schedule(matmul, debug_mask="all")
C = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder_block_iter_var(C, [2, 1])
def test_reorder_block_iter_var_fail_not_within_bound():
sch = tir.Schedule(matmul, debug_mask="all")
C = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder_block_iter_var(C, [-1, 3, 2])
def test_reorder_block_iter_var_fail_not_unique():
sch = tir.Schedule(matmul, debug_mask="all")
C = sch.get_block("C")
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder_block_iter_var(C, [0, 0, 2])
if __name__ == "__main__":
tvm.testing.main()
| 2,870 | 32 | 88 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_arg_info.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm.meta_schedule.arg_info import ArgInfo, TensorInfo
from tvm.script import tir as T
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
@T.prim_func
def Matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (128, 256), "float32")
B = T.match_buffer(b, (256, 512), "float32")
C = T.match_buffer(c, (128, 512), "float32")
for i, j, k in T.grid(128, 256, 512):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def test_meta_schedule_tensor_info_creation():
info = TensorInfo("float32", [1, 224, 224, 3])
info = str(info)
assert info == 'TensorInfo("float32", [1, 224, 224, 3])'
def test_meta_schedule_tensor_info_as_json():
info = TensorInfo("float32", [1, 224, 224, 3])
info = info.as_json()
assert info == ["TENSOR", "float32", [1, 224, 224, 3]]
def test_meta_schedule_tensor_info_from_json():
info = ["TENSOR", "float32", [1, 224, 224, 3]]
info = TensorInfo.from_json(info)
assert str(info) == 'TensorInfo("float32", [1, 224, 224, 3])'
def test_meta_schedule_arg_info_from_prim_func():
a_info, b_info, c_info = ArgInfo.from_prim_func(Matmul)
assert str(a_info) == 'TensorInfo("float32", [128, 256])'
assert str(b_info) == 'TensorInfo("float32", [256, 512])'
assert str(c_info) == 'TensorInfo("float32", [128, 512])'
if __name__ == "__main__":
test_meta_schedule_tensor_info_creation()
test_meta_schedule_tensor_info_as_json()
test_meta_schedule_tensor_info_from_json()
test_meta_schedule_arg_info_from_prim_func()
| 2,778 | 37.597222 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_mutator_mutate_tile_size.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import operator
from functools import reduce
from typing import List
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=invalid-name, no-member
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [512, 512])
B = T.match_buffer(b, [512, 512])
C = T.match_buffer(c, [512, 512])
for i, j, k in T.grid(512, 512, 512): # type: ignore
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k]) # type: ignore
with T.init():
C[vi, vj] = 0.0 # type: ignore
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
# pylint: enable=invalid-name, no-member
def _sch(decisions: List[List[int]]) -> Schedule:
sch = Schedule(matmul, debug_mask="all")
# pylint: disable=invalid-name
(d0,) = decisions
b0 = sch.get_block(name="C", func_name="main")
sch.get_consumers(block=b0)
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2,
n=4,
max_innermost_factor=64,
decision=d0,
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8])
l17, l18, l19, l20 = sch.split(loop=l3, factors=[8, 4, 8, 2])
l23, l24 = sch.split(loop=l4, factors=[512, 1])
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True)
# pylint: enable=invalid-name
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=matmul,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateTileSize(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_tile_size_matmul():
mutator = _make_mutator(
target=Target("llvm --num-cores=16"),
)
results = {}
sch = _sch(decisions=[[4, 32, 4, 1]])
for _ in range(1000):
trace = mutator.apply(sch.trace)
assert trace.insts[4].kind.name == "SamplePerfectTile"
decision = trace.decisions[trace.insts[4]]
decision = [int(x) for x in decision]
results[str(decision)] = decision
assert reduce(operator.mul, decision, 1) == 512
assert len(results) > 15
def test_mutate_sample_categorical_single_candidate():
mutator = _make_mutator(
target=Target("llvm --num-cores=16"),
)
sch = Schedule(matmul, debug_mask="all")
sch.sample_categorical(candidates=[1], probs=[1.0], decision=0)
# The mutator finds the SampleCategorical has only one candidate, and thus skips it.
trace = mutator.apply(sch.trace)
assert trace is None
if __name__ == "__main__":
test_mutate_tile_size_matmul()
test_mutate_sample_categorical_single_candidate()
| 3,933 | 33.814159 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_loop_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test loop state and schedule primitives"""
import numpy as np
import tvm
from tvm import auto_scheduler, te
from tvm import topi
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
)
def test_split_fuse_reorder_annotation():
A, B, C = matmul_auto_scheduler_test(N=512, M=512, K=512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
i, j, k = s0[C].iters
assert i.range.extent == 512
io, ii = s0.split(C, i, [16])
assert s0[C].iters[0] == io
assert s0[C].iters[1] == ii
assert io.range.extent == 32
assert ii.range.extent == 16
jo, ji = s0.split(C, j, [8])
assert jo.range.extent == 64
assert ji.range.extent == 8
s0.reorder(C, [io, jo, k, ji, ii])
assert s0[C].iters[2].range.extent == 512
fused_it = s0.fuse(C, [io, jo])
assert fused_it.range.extent == 2048
s1 = dag.get_init_state()
i, j, _ = s1[C].iters
i1, i2, i3 = s1.split(C, i, [8, 2])
j1, j2, j3 = s1.split(C, j, [32, 8], False)
assert s1[C].iters[0].range.extent == 32
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 2
assert s1[C].iters[3].range.extent == 32
assert s1[C].iters[4].range.extent == 8
assert s1[C].iters[5].range.extent == 2
res = s1.bind(C, i1, "blockIdx.x")
assert res == s1[C].iters[0]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["blockIdx.x"]
res = s1.bind(C, i2, "vthread")
assert res == s1[C].iters[1]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vthread"]
res = s1.bind(C, i3, "threadIdx.y")
assert res == s1[C].iters[2]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["threadIdx.y"]
res = s1.parallel(C, j1)
assert res == s1[C].iters[3]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["parallel"]
res = s1.unroll(C, j2)
assert res == s1[C].iters[4]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["unroll"]
res = s1.vectorize(C, j3)
assert res == s1[C].iters[5]
assert res.annotation == auto_scheduler.loop_state.State.ANNOTATION_TRANS_TABLE["vectorize"]
def test_compute_at_root_inline():
dag = auto_scheduler.ComputeDAG(
conv2d_nchw_bn_relu_auto_scheduler_test(
N=1, H=224, W=224, CI=3, CO=64, kernel_size=7, strides=2, padding=3
)
)
s0 = dag.get_init_state()
# data, padding, kernel = 0, 1, 2
conv = s0.stage_ops[3]
# bias = 4
bias_add = s0.stage_ops[5]
# bn_scale = 6
bn_mul = s0.stage_ops[7]
# bn_offset = 8
bn_add = s0.stage_ops[9]
relu = s0.stage_ops[10]
s0.compute_inline(bn_add)
assert s0[bn_add].compute_at == 1
s0.compute_inline(bn_mul)
assert s0[bn_mul].compute_at == 1
s0.compute_inline(bias_add)
assert s0[bias_add].compute_at == 1
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_at(conv, relu, s0[relu].iters[2])
assert s0[conv].compute_at == 2
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 1
assert s0[conv].iters[2].range.extent == 1
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
s0.compute_root(bn_mul)
assert s0[bn_mul].compute_at == 0
s0.compute_root(conv)
assert s0[conv].compute_at == 0
s0 = dag.infer_bound_from_state(s0)
assert s0[conv].iters[0].range.extent == 1
assert s0[conv].iters[1].range.extent == 64
assert s0[conv].iters[2].range.extent == 112
assert s0[conv].iters[3].range.extent == 112
assert s0[conv].iters[4].range.extent == 3
assert s0[conv].iters[5].range.extent == 7
assert s0[conv].iters[6].range.extent == 7
def test_cache_read_write():
N, H, W, CO, CI, KH, KW, strides, padding = 4, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1)
data = te.placeholder((N, CI, H, W), name="Data")
kernel_data = te.placeholder((CO, CI, KH, KW), name="Kernel_data")
k0, k1 = te.compute(
kernel_data.shape,
lambda *i: (kernel_data(*i) + 1, kernel_data(*i) / 2),
name="Kernel_split",
)
kernel = te.compute(kernel_data.shape, lambda *i: k0(*i) + k1(*i), name="Kernel")
conv = topi.nn.conv2d_nchw(data, kernel, strides, padding, dilation=1)
relu = topi.nn.relu(conv)
add = topi.add(data, relu)
dag = auto_scheduler.ComputeDAG([data, kernel_data, add])
s0 = dag.get_init_state()
pad_temp = s0.stage_ops[1]
kernel_split = s0.stage_ops[3]
# 0: init state
ori_its = s0[add].iters
its = s0.split(add, s0[add].iters[0], [2])
s0.reorder(add, [its[0], ori_its[1], its[1], ori_its[2], ori_its[3]])
s0.compute_inline(relu)
# 1: simple cache_write with compute_at
conv_global = s0.cache_write(conv, "global")
s0.compute_at(conv_global, conv, s0[conv].iters[3])
# 2: simple cache_read with compute_at
kernel_global = s0.cache_read(kernel, "global", [conv_global])
s0.compute_at(kernel_global, conv_global, s0[conv_global].iters[4])
"""
Placeholder: Data, Kernel_data
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
# 3: two level cache_read with compute_at
# preparing for GPU's shared memory & local memory
pad_temp_global = s0.cache_read(pad_temp, "global", [conv_global])
pad_temp_shared = s0.cache_read(pad_temp_global, "shared", [conv_global])
s0.compute_at(pad_temp_global, conv_global, s0[conv_global].iters[2])
s0.compute_at(pad_temp_shared, conv_global, s0[conv_global].iters[4])
# 4: cache_read with multi readers
# This stage cannot be compute at to its consumer
s0.cache_read(data, "global", [pad_temp, add])
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
s1 = dag.infer_bound_from_state(s0)
assert s1[conv].iters[0].range.extent == 4
assert s1[conv].iters[1].range.extent == 512
assert s1[conv].iters[2].range.extent == 7
assert s1[conv].iters[3].range.extent == 7
assert s1[kernel_global].iters[0].range.extent == 1
assert s1[kernel_global].iters[1].range.extent == 1
assert s1[kernel_global].iters[2].range.extent == 3
assert s1[kernel_global].iters[3].range.extent == 3
assert s1[conv_global].iters[0].range.extent == 1
assert s1[conv_global].iters[1].range.extent == 1
assert s1[conv_global].iters[2].range.extent == 1
assert s1[conv_global].iters[3].range.extent == 1
assert s1[conv_global].iters[4].range.extent == 512
assert s1[conv_global].iters[5].range.extent == 3
assert s1[conv_global].iters[6].range.extent == 3
assert s1[pad_temp_global].iters[0].range.extent == 1
assert s1[pad_temp_global].iters[1].range.extent == 512
assert s1[pad_temp_global].iters[2].range.extent == 3
assert s1[pad_temp_global].iters[3].range.extent == 3
assert s1[pad_temp_shared].iters[0].range.extent == 1
assert s1[pad_temp_shared].iters[1].range.extent == 1
assert s1[pad_temp_shared].iters[2].range.extent == 3
assert s1[pad_temp_shared].iters[3].range.extent == 3
# 5: cache_write with multi outputs
# TVM's cache_write actually has a bug with this case:
#
# After schedule.cache_write, TVM generate one new stage:
# From: kernel_data -> kernel_split -> kernel
# To: kernel_data -> kernel_split_global -> kernel_split -> kernel
#
# But with topo sort analyse, we get:
# // kernel_data -> kernel_split_global -> kernel_split -> kernel
# \ /
# ----------------> kernel_split ---------------->
#
# TODO(jcf94): Seems there's bug with the input/output tensor. Such multi outputs case
# should be unusual, so we make some hack on DoCacheWrite. This should be fixed later.
kernel_split_global = s0.cache_write(kernel_split, "global")
"""
Placeholder: Data, Kernel_data
for ax0 (0,4)
for ax1 (0,512)
for ax2 (0,7)
for ax3 (0,7)
Data.global = ...
for i0 (0,4)
for i1 (0,512)
for i2 (0,9)
for i3 (0,9)
pad_temp = ...
for i0_c (0,512)
for i1_c (0,512)
for i2_c (0,3)
for i3_c (0,3)
Kernel_split.global = ...
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel_split = ...
(******* Bug here, there should not be two kernel_split stage *******)
for i0 (0,512)
for i1 (0,512)
for i2 (0,3)
for i3 (0,3)
Kernel = ...
for nn (0,4)
for ff (0,512)
for yy (0,7)
for xx (0,7)
for nn_c (None)
for ff_c (None)
for yy_c (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global = ...
for xx_c (None)
for rc (None)
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
Kernel.global = ...
for ax0 (None)
for ax1 (None)
for ax2 (None)
for ax3 (None)
pad_temp.global.shared = ...
for ry (None)
for rx (None)
compute.global = ...
compute = ...
for ax0.0 (0,2)
for ax1 (0,512)
for ax0.1 (0,2)
for ax2 (0,7)
for ax3 (0,7)
T_add = ...
"""
assert len(s0[kernel_split].iters) == len(s0[kernel_split_global].iters)
for it0, it1 in zip(s0[kernel_split].iters, s0[kernel_split_global].iters):
assert it0.range == it1.range
def test_follow_split_follow_fused_split():
A, B, C = matmul_auto_scheduler_test(512, 512, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
C_global = s0.cache_write(C, "global")
its0 = s0.split(C, s0[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s0.transform_steps) - 1
for level in range(1, 6):
tmp = s0.copy()
tmp.follow_split(C_global, tmp[C_global].iters[0], split_step0, level)
for i in range(0, level):
assert tmp[C].iters[i].range.extent == tmp[C_global].iters[i].range.extent
its1 = s0.split(C, s0[C].iters[5], [2, 2, 4, 8])
split_step1 = len(s0.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
s0.reorder(C, its)
for i in range(0, 5):
s0.fuse(C, [s0[C].iters[i], s0[C].iters[i + 1]])
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, False
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[0].range.extent
for level in range(0, 4):
tmp = s0.copy()
tmp.follow_fused_split(
C_global, tmp[C_global].iters[0], [split_step0, split_step1], level, True
)
assert tmp[C].iters[level + 1].range.extent == tmp[C_global].iters[1].range.extent
def test_rfactor():
A, B, C = matmul_auto_scheduler_test(8, 8, 512)
dag = auto_scheduler.ComputeDAG([A, B, C])
s0 = dag.get_init_state()
ko, ki = s0.split(C, s0[C].iters[2], [16])
s1 = s0.copy()
C_r = s1.rfactor(C, ko, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_o (0,32)
for k_i (0,16)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_o_v (0,32)
C.repl = ...
"""
assert s1[C_r].iters[0].range.extent == 8
assert s1[C_r].iters[1].range.extent == 8
assert s1[C_r].iters[2].range.extent == 32
assert s1[C_r].iters[3].range.extent == 16
assert s1[C].iters[0].range.extent == 8
assert s1[C].iters[1].range.extent == 8
assert s1[C].iters[2].range.extent == 32
s2 = s0.copy()
C_r = s2.rfactor(C, ki, 2)
"""
Placeholder: A, B
for i (0,8)
for j (0,8)
for k_i (0,16)
for k_o (0,32)
C.rf = ...
for ax0 (0,8)
for ax1 (0,8)
for k_i_v (0,16)
C.repl = ...
"""
assert s2[C_r].iters[0].range.extent == 8
assert s2[C_r].iters[1].range.extent == 8
assert s2[C_r].iters[2].range.extent == 16
assert s2[C_r].iters[3].range.extent == 32
assert s2[C].iters[0].range.extent == 8
assert s2[C].iters[1].range.extent == 8
assert s2[C].iters[2].range.extent == 16
if __name__ == "__main__":
test_split_fuse_reorder_annotation()
test_compute_at_root_inline()
test_cache_read_write()
test_follow_split_follow_fused_split()
test_rfactor()
| 18,652 | 34.665392 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_storage_flatten.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
from tvm.relay import GlobalVar
def test_flatten2():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], 8)
s[A1].compute_at(s[A2], xo)
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
A2b = tvm.tir.decl_buffer(A2.shape, A2.dtype, name="A2")
mod = schedule_to_module(s, [Ab, A2b], binds={A: Ab, A2: A2b})
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def test_flatten_prefetch():
A = te.placeholder((25, 100, 4), name="A")
_A = tvm.tir.decl_buffer(A.shape, A.dtype, name="A")
i = te.size_var("i")
j = te.size_var("j")
region = [tvm.ir.Range.from_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]]
stmt = tvm.tir.Prefetch(_A, region)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([_A], stmt, {A: _A})
mod = tvm.IRModule.from_expr(func)
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body
assert stmt.extent.value == 2
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.extent.value == 2
def assert_flat_loads(stmt):
if isinstance(stmt, tvm.tir.BufferLoad):
assert len(stmt.indices) == 1, "All prefetch indices should be flattened"
tvm.tir.stmt_functor.post_order_visit(stmt, assert_flat_loads)
def test_flatten_storage_align():
m = 8
l = 16
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
s[A1].storage_align(A1.op.axis[0], 2, 1)
mod = schedule_to_module(s, [A, A2])
mod = tvm.transform.Sequential(
[tvm.tir.transform.StorageFlatten(64), tvm.tir.transform.Simplify()]
)(mod)
stmt = mod["main"].body
assert stmt.extents[0].value == 17 * 8
def test_flatten_double_buffer():
@tvm.script.ir_module
class ModFromScript:
@T.prim_func
def main(A_param: T.handle, C_param: T.handle):
A = T.match_buffer(A_param, (400,), "float32", strides=[1])
C = T.match_buffer(C_param, (4,), "float32", strides=[1])
T.func_attr({"from_legacy_te_schedule": True})
threadIdx_x = T.env_thread("threadIdx.x")
T.launch_thread(threadIdx_x, 1)
for i in T.serial(0, 100):
B = T.decl_buffer([4], "float32", scope="shared")
with T.attr(B.data, "double_buffer_scope", 1):
for j in T.serial(0, 4):
B[j] = A[4 * i + j]
for j in T.serial(0, 4):
C[j] = B[j] + 1.0
mod = ModFromScript
with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}):
mod = tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.InjectDoubleBuffer(),
tvm.tir.transform.Simplify(),
]
)(mod)
stmt = mod["main"].body
assert isinstance(stmt.body, tvm.tir.Allocate)
assert list(stmt.body.extents) == [8]
mod = tvm.tir.transform.ThreadSync("shared")(mod)
f = mod["main"]
count = [0]
def count_sync(op):
if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")):
count[0] += 1
tvm.tir.stmt_functor.post_order_visit(f.body, count_sync)
assert count[0] == 4
def test_flatten_let_buffer():
@tvm.script.ir_module
class module:
@T.prim_func
def main():
T.func_attr({"from_legacy_te_schedule": True})
# If a pointer defined using a LetStmt,
A_data: T.handle("int32") = T.call_extern("dummy_extern_function", dtype="handle")
# and a buffer is backed by that pointer,
A = T.decl_buffer([1], dtype="float32", data=A_data)
T.evaluate(A[0])
# then the call to StorageFlatten would result in an exception
# being thrown.
tvm.tir.transform.StorageFlatten(64)(module)
@T.prim_func
def tir_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2])
B = T.match_buffer(a, [2, 2])
A[0, 1] = B[1, 1]
def test_flatten_tir():
orig_mod = tvm.IRModule({GlobalVar("main"): tir_func})
mod = tvm.tir.transform.StorageFlatten(64)(orig_mod)
tvm.ir.assert_structural_equal(
orig_mod, mod
) # StorageFlatten should do nothing to TIR functions
class TestPreserveDeclBuffer(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.StorageFlatten(64)
def before():
T.func_attr({"from_legacy_te_schedule": True})
A = T.decl_buffer([16, 16], "float32")
for i, j in T.grid(16, 16):
A[i, j] = 0.0
def expected():
T.func_attr({"from_legacy_te_schedule": True})
A = T.decl_buffer([256], "float32")
for i, j in T.grid(16, 16):
A[i * 16 + j] = 0.0
if __name__ == "__main__":
tvm.testing.main()
| 6,193 | 32.301075 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, PoolInfoProperties
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_create_pool_info():
target = Target("c")
pool_info = WorkspacePoolInfo(
"foo_workspace",
[target],
)
assert pool_info.pool_name == "foo_workspace"
# default pool size constraint
assert pool_info.size_hint_bytes == -1
pool_info = WorkspacePoolInfo(
"bar_workspace",
[target],
PoolInfoProperties(size_hint_bytes=1425),
)
assert pool_info.pool_name == "bar_workspace"
assert pool_info.size_hint_bytes == 1425
def test_create_buffer_info():
global_ws_pool = WorkspacePoolInfo(
"global_workspace",
[Target("c")],
)
buffer_info_obj = tvm.tir.usmp.BufferInfo(
name_hint="buf1", size_bytes=256, pool_candidates=[global_ws_pool]
)
assert buffer_info_obj.name_hint == "buf1"
assert buffer_info_obj.size_bytes == 256
assert list(buffer_info_obj.pool_candidates) == [global_ws_pool]
# default workspace alignment
assert buffer_info_obj.alignment == 1
buffer_info_obj = tvm.tir.usmp.BufferInfo("buf2", 512, [global_ws_pool], 8)
assert buffer_info_obj.name_hint == "buf2"
assert buffer_info_obj.size_bytes == 512
assert list(buffer_info_obj.pool_candidates) == [global_ws_pool]
assert buffer_info_obj.alignment == 8
def test_create_pool_allocation():
pool_info = WorkspacePoolInfo(
"foo_workspace",
[Target("c")],
)
pool_allocation = usmp_utils.PoolAllocation(pool_info=pool_info, byte_offset=64)
assert pool_allocation.pool_info == pool_info
assert pool_allocation.byte_offset == 64
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def test_create_array_buffer_info():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_ws_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_array = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
for buffer_info in buffer_info_array:
assert buffer_info in buffer_info_analysis.buffer_info_stmts.keys()
if __name__ == "__main__":
tvm.testing.main()
| 10,342 | 50.20297 | 459 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.target import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteLayout(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
def transform(self):
def inner(mod):
target = Target("cuda", host="llvm")
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteLayout(),
],
mutator_probs={},
),
task_name="test",
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
return sch.mod
return inner
class TestTIRMatmul(BaseBeforeAfter):
"""Main functionality test
A new block should be inserted to transform the layout, with the
compute block operating on the temporary transformed buffer.
"""
def before(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.S(16, i0 * 4 + i1)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
def expected(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_reindex = T.alloc_buffer([16, 4, 4], dtype="float32")
for ax0, ax1 in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [ax0, ax1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_reindex[i1, i0 // 4, i0 % 4] = B[i0, i1]
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_reindex[vj, vk // 4, vk % 4]
class TestRewrittenBuffersMustOccurWithinBlock(BaseBeforeAfter):
"""Buffers must occur within a Block"""
def before(
A: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [0]})
for i, j in T.grid(16, 16):
T.evaluate(A[i, j])
expected = tvm.TVMError
class TestExtentOne(BaseBeforeAfter):
"""Buffers with dimensions of extent 1 can be transformed
Regression test for a previous bug, in which the removal of
trivial variables resulted in an error in `IndexMap::Inverse`.
"""
def before(
A: T.Buffer((16, 1), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [0]})
for i, j in T.grid(16, 1):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(A[vi, vj])
def expected(A: T.Buffer((16, 1), "float32")):
T.func_attr({"layout_free_buffers": [0]})
A_global = T.alloc_buffer([16], dtype="float32")
for ax0, ax1 in T.grid(16, 1):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
A_global[v0] = A[v0, v1]
for i, j in T.grid(16, 1):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(A_global[vi])
@T.prim_func
def tir_matmul(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.S(16, i0 * 4 + i1)
vj = T.axis.S(16, j)
vk = T.axis.R(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@T.prim_func
def rewritten_tir_matmul(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_reindex = T.alloc_buffer([16, 4, 4], dtype="float32")
for ax0, ax1 in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [ax0, ax1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_reindex[i1, i0 // 4, i0 % 4] = B[i0, i1]
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_reindex[vj, vk // 4, vk % 4]
def test_layout_rewrite():
target = _target()
ctx = _create_context(tir_matmul, target)
sch = tvm.tir.Schedule(tir_matmul, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], rewritten_tir_matmul)
# fmt: off
@tvm.script.ir_module
class Conv2dCacheRead:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((3, 3, 64, 64), "float32"), conv2d_nhwc: T.Buffer((1, 56, 56, 64), "float32")):
T.func_attr({"layout_free_buffers": [1], "tir.noalias": True, "global_symbol": "main"})
pad_temp = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
pad_temp_global = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
p1_global = T.alloc_buffer([3, 3, 64, 64], dtype="float32")
for i0_0_i1_0_i2_0_fused in T.parallel(4, annotations={"pragma_auto_unroll_max_step":16, "pragma_unroll_explicit":1}):
for ax0, ax1, ax2 in T.grid(1, 30, 30):
for ax3_fused in T.vectorized(64):
with T.block("pad_temp"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax1)
i2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax2)
i3 = T.axis.spatial(64, ax3_fused)
T.reads(p0[i0, i1 - 1, i2 - 1, i3])
T.writes(pad_temp[i0, i1, i2, i3])
pad_temp[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 57 and 1 <= i2 and i2 < 57, p0[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i3_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused in T.serial(57600):
with T.block("pad_temp_global"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax0_ax1_ax2_ax3_fused // 1920)
v2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax0_ax1_ax2_ax3_fused % 1920 // 64)
v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64)
T.reads(pad_temp[v0, v1, v2, v3])
T.writes(pad_temp_global[v0, v1, v2, v3])
pad_temp_global[v0, v1, v2, v3] = pad_temp[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in T.serial(2304):
with T.block("p1_global"):
v0 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused // 768)
v1 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 768 // 256)
v2 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 256 // 4)
v3 = T.axis.spatial(64, i3_0 * 4 + ax0_ax1_ax2_ax3_fused % 4)
T.reads(p1[v0, v1, v2, v3])
T.writes(p1_global[v0, v1, v2, v3])
p1_global[v0, v1, v2, v3] = p1[v0, v1, v2, v3]
for i0_1, i1_1, i2_1, i3_1 in T.grid(1, 7, 2, 1):
for i0_2_init, i1_2_init, i2_2_init, i3_2_init, i0_3_init, i1_3_init, i2_3_init in T.grid(1, 1, 14, 2, 1, 4, 1):
for i3_3_fused_init in T.vectorized(2):
with T.block("conv2d_nhwc_init"):
nn = T.axis.spatial(1, i0_1 + i0_2_init + i0_3_init)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2_init * 4 + i1_3_init)
xx = T.axis.spatial(56, i2_3_init + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2_init)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2_init * 2 + i3_3_fused_init)
T.reads()
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = T.float32(0)
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3 in T.grid(1, 1, 2, 1, 1, 14, 2, 3, 3, 32, 1, 4, 1):
for i3_3_fused in T.vectorized(2):
with T.block("conv2d_nhwc_update"):
nn = T.axis.spatial(1, i0_1 + i0_2 + i0_3)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2 * 4 + i1_3)
xx = T.axis.spatial(56, i2_3 + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2 * 2 + i3_3_fused)
ry = T.axis.reduce(3, i4_0 * 3 + i4_1)
rx = T.axis.reduce(3, i5_0 * 3 + i5_1)
rc = T.axis.reduce(64, i6_0 * 32 + i6_1)
T.reads(conv2d_nhwc_global[nn, yy, xx, ff], pad_temp_global[nn, yy + ry, xx + rx, rc], p1_global[ry, rx, rc, ff])
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = conv2d_nhwc_global[nn, yy, xx, ff] + pad_temp_global[nn, yy + ry, xx + rx, rc] * p1_global[ry, rx, rc, ff]
for ax0, ax1, ax2 in T.grid(1, 4, 14):
for ax3_fused in T.vectorized(4):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + ax1)
v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + ax2)
v3 = T.axis.spatial(64, i3_0 * 4 + ax3_fused)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@tvm.script.ir_module
class Conv2dCacheReadRewritten:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((3, 3, 64, 64), "float32"), conv2d_nhwc: T.Buffer((1, 56, 56, 64), "float32")):
T.func_attr({"layout_free_buffers": [1], "tir.noalias": True, "global_symbol": "main"})
pad_temp = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
pad_temp_global = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
p1_global = T.alloc_buffer([16, 2, 2, 3, 3, 32, 2], dtype="float32")
p1_global_1 = T.alloc_buffer([16, 2, 2, 3, 3, 32, 2], dtype="float32")
for ax0, ax1, ax2, ax3 in T.grid(3, 3, 64, 64):
with T.block("p1_global"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(p1[v0, v1, v2, v3])
T.writes(p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
T.block_attr({"meta_schedule.layout_rewrite_preproc":True})
p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2] = p1[v0, v1, v2, v3]
for i0_0_i1_0_i2_0_fused in T.parallel(4, annotations={"pragma_auto_unroll_max_step":16, "pragma_unroll_explicit":1}):
for ax0, ax1, ax2 in T.grid(1, 30, 30):
for ax3_fused in T.vectorized(64):
with T.block("pad_temp"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax1)
i2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax2)
i3 = T.axis.spatial(64, ax3_fused)
T.reads(p0[i0, i1 - 1, i2 - 1, i3])
T.writes(pad_temp[i0, i1, i2, i3])
pad_temp[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 57 and 1 <= i2 and i2 < 57, p0[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i3_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused in T.serial(57600):
with T.block("pad_temp_global"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax0_ax1_ax2_ax3_fused // 1920)
v2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax0_ax1_ax2_ax3_fused % 1920 // 64)
v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64)
T.reads(pad_temp[v0, v1, v2, v3])
T.writes(pad_temp_global[v0, v1, v2, v3])
pad_temp_global[v0, v1, v2, v3] = pad_temp[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in T.serial(2304):
with T.block("p1_global"):
v0 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused // 768)
v1 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 768 // 256)
v2 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 256 // 4)
v3 = T.axis.spatial(64, i3_0 * 4 + ax0_ax1_ax2_ax3_fused % 4)
T.reads(p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
T.writes(p1_global[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
p1_global[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2] = p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2]
for i0_1, i1_1, i2_1, i3_1 in T.grid(1, 7, 2, 1):
for i0_2_init, i1_2_init, i2_2_init, i3_2_init, i0_3_init, i1_3_init, i2_3_init in T.grid(1, 1, 14, 2, 1, 4, 1):
for i3_3_fused_init in T.vectorized(2):
with T.block("conv2d_nhwc_init"):
nn = T.axis.spatial(1, i0_1 + i0_2_init + i0_3_init)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2_init * 4 + i1_3_init)
xx = T.axis.spatial(56, i2_3_init + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2_init)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2_init * 2 + i3_3_fused_init)
T.reads()
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = T.float32(0)
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3 in T.grid(1, 1, 2, 1, 1, 14, 2, 3, 3, 32, 1, 4, 1):
for i3_3_fused in T.vectorized(2):
with T.block("conv2d_nhwc_update"):
nn = T.axis.spatial(1, i0_1 + i0_2 + i0_3)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2 * 4 + i1_3)
xx = T.axis.spatial(56, i2_3 + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2 * 2 + i3_3_fused)
ry = T.axis.reduce(3, i4_0 * 3 + i4_1)
rx = T.axis.reduce(3, i5_0 * 3 + i5_1)
rc = T.axis.reduce(64, i6_0 * 32 + i6_1)
T.reads(conv2d_nhwc_global[nn, yy, xx, ff], pad_temp_global[nn, yy + ry, xx + rx, rc], p1_global[ff // 4, rc // 32, ff % 4 // 2, ry, rx, rc % 32, ff % 2])
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = conv2d_nhwc_global[nn, yy, xx, ff] + pad_temp_global[nn, yy + ry, xx + rx, rc] * p1_global[ff // 4, rc // 32, ff % 4 // 2, ry, rx, rc % 32, ff % 2]
for ax0, ax1, ax2 in T.grid(1, 4, 14):
for ax3_fused in T.vectorized(4):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + ax1)
v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + ax2)
v3 = T.axis.spatial(64, i3_0 * 4 + ax3_fused)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
@tvm.script.ir_module
class Conv2dCacheReadMultipleRewritten:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((3, 3, 64, 64), "float32"), conv2d_nhwc: T.Buffer((1, 56, 56, 64), "float32")):
T.func_attr({"layout_free_buffers": [1], "tir.noalias": True, "global_symbol": "main"})
pad_temp = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
conv2d_nhwc_global = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
pad_temp_global = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
p1_global = T.alloc_buffer([16, 2, 2, 3, 3, 32, 2], dtype="float32")
p1_global2 = T.alloc_buffer([16, 2, 2, 3, 3, 32, 2], dtype="float32", scope="global2")
p1_global_1 = T.alloc_buffer([16, 2, 2, 3, 3, 32, 2], dtype="float32")
for ax0, ax1, ax2, ax3 in T.grid(3, 3, 64, 64):
with T.block("p1_global"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(p1[v0, v1, v2, v3])
T.writes(p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
T.block_attr({"meta_schedule.layout_rewrite_preproc":True})
p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2] = p1[v0, v1, v2, v3]
for ax0, ax1, ax2, ax3 in T.grid(3, 3, 64, 64):
with T.block("p1_global2"):
v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
T.writes(p1_global2[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
p1_global2[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2] = p1_global_1[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2]
for i0_0_i1_0_i2_0_fused in T.parallel(4, annotations={"pragma_auto_unroll_max_step":16, "pragma_unroll_explicit":1}):
for ax0, ax1, ax2 in T.grid(1, 30, 30):
for ax3_fused in T.vectorized(64):
with T.block("pad_temp"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax1)
i2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax2)
i3 = T.axis.spatial(64, ax3_fused)
T.reads(p0[i0, i1 - 1, i2 - 1, i3])
T.writes(pad_temp[i0, i1, i2, i3])
pad_temp[i0, i1, i2, i3] = T.if_then_else(1 <= i1 and i1 < 57 and 1 <= i2 and i2 < 57, p0[i0, i1 - 1, i2 - 1, i3], T.float32(0), dtype="float32")
for i3_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused in T.serial(57600):
with T.block("pad_temp_global"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused // 2 * 28 + ax0_ax1_ax2_ax3_fused // 1920)
v2 = T.axis.spatial(58, i0_0_i1_0_i2_0_fused % 2 * 28 + ax0_ax1_ax2_ax3_fused % 1920 // 64)
v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64)
T.reads(pad_temp[v0, v1, v2, v3])
T.writes(pad_temp_global[v0, v1, v2, v3])
pad_temp_global[v0, v1, v2, v3] = pad_temp[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused in T.serial(2304):
with T.block("p1_global"):
v0 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused // 768)
v1 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 768 // 256)
v2 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 256 // 4)
v3 = T.axis.spatial(64, i3_0 * 4 + ax0_ax1_ax2_ax3_fused % 4)
T.reads(p1_global2[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
T.writes(p1_global[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2])
p1_global[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2] = p1_global2[v3 // 4, v2 // 32, v3 % 4 // 2, v0, v1, v2 % 32, v3 % 2]
for i0_1, i1_1, i2_1, i3_1 in T.grid(1, 7, 2, 1):
for i0_2_init, i1_2_init, i2_2_init, i3_2_init, i0_3_init, i1_3_init, i2_3_init in T.grid(1, 1, 14, 2, 1, 4, 1):
for i3_3_fused_init in T.vectorized(2):
with T.block("conv2d_nhwc_init"):
nn = T.axis.spatial(1, i0_1 + i0_2_init + i0_3_init)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2_init * 4 + i1_3_init)
xx = T.axis.spatial(56, i2_3_init + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2_init)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2_init * 2 + i3_3_fused_init)
T.reads()
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = T.float32(0)
for i4_0, i5_0, i6_0, i0_2, i1_2, i2_2, i3_2, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3 in T.grid(1, 1, 2, 1, 1, 14, 2, 3, 3, 32, 1, 4, 1):
for i3_3_fused in T.vectorized(2):
with T.block("conv2d_nhwc_update"):
nn = T.axis.spatial(1, i0_1 + i0_2 + i0_3)
yy = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + i1_2 * 4 + i1_3)
xx = T.axis.spatial(56, i2_3 + i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + i2_2)
ff = T.axis.spatial(64, i3_0 * 4 + i3_1 * 4 + i3_2 * 2 + i3_3_fused)
ry = T.axis.reduce(3, i4_0 * 3 + i4_1)
rx = T.axis.reduce(3, i5_0 * 3 + i5_1)
rc = T.axis.reduce(64, i6_0 * 32 + i6_1)
T.reads(conv2d_nhwc_global[nn, yy, xx, ff], pad_temp_global[nn, yy + ry, xx + rx, rc], p1_global[ff // 4, rc // 32, ff % 4 // 2, ry, rx, rc % 32, ff % 2])
T.writes(conv2d_nhwc_global[nn, yy, xx, ff])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS"})
conv2d_nhwc_global[nn, yy, xx, ff] = conv2d_nhwc_global[nn, yy, xx, ff] + pad_temp_global[nn, yy + ry, xx + rx, rc] * p1_global[ff // 4, rc // 32, ff % 4 // 2, ry, rx, rc % 32, ff % 2]
for ax0, ax1, ax2 in T.grid(1, 4, 14):
for ax3_fused in T.vectorized(4):
with T.block("conv2d_nhwc_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused // 2 * 28 + i1_1 * 4 + ax1)
v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_fused % 2 * 28 + i2_1 * 14 + ax2)
v3 = T.axis.spatial(64, i3_0 * 4 + ax3_fused)
T.reads(conv2d_nhwc_global[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3]
# fmt: on
def test_layout_rewrite_cache_read():
target = Target("llvm")
ctx = _create_context(Conv2dCacheRead, target)
sch = tvm.tir.Schedule(Conv2dCacheRead, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Conv2dCacheReadRewritten)
def test_layout_rewrite_cache_read_multiple():
target = Target("llvm")
ctx = _create_context(Conv2dCacheRead, target)
sch = tvm.tir.Schedule(Conv2dCacheRead, debug_mask="all")
sch.cache_read(sch.get_block("p1_global"), 0, "global2")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Conv2dCacheReadMultipleRewritten)
class TestLayoutRewriteInt64Index(BaseBeforeAfter):
def before(
p0: T.Buffer((T.int64(12), T.int64(197), T.int64(64)), "int8"),
p1: T.Buffer((T.int64(12), T.int64(197), T.int64(64)), "int8"),
T_batch_matmul_NT: T.Buffer((T.int64(12), T.int64(197), T.int64(197)), "int32"),
):
T.func_attr({"layout_free_buffers": [1], "global_symbol": "main", "tir.noalias": True})
for b_0_i_0_fused in T.parallel(T.int64(394)):
for j_0 in T.serial(T.int64(1)):
for b_1, i_1, j_1 in T.grid(T.int64(1), T.int64(1), T.int64(1)):
for b_2_init, i_2_init, j_2_init, b_3_init, i_3_init, j_3_init in T.grid(
T.int64(6), T.int64(1), T.int64(197), T.int64(1), T.int64(1), T.int64(1)
):
with T.block("T_batch_matmul_NT_init"):
v_b = T.axis.spatial(
T.int64(12),
b_3_init
+ b_0_i_0_fused // T.int64(197) * T.int64(6)
+ b_1 * T.int64(6)
+ b_2_init,
)
v_i = T.axis.spatial(
T.int64(197),
b_0_i_0_fused % T.int64(197) + i_1 + i_2_init + i_3_init,
)
v_j = T.axis.spatial(
T.int64(197),
j_3_init + j_0 * T.int64(197) + j_1 * T.int64(197) + j_2_init,
)
T_batch_matmul_NT[v_b, v_i, v_j] = 0
for k_0, b_2, i_2, j_2, k_1, b_3, i_3, j_3 in T.grid(
T.int64(64),
T.int64(6),
T.int64(1),
T.int64(197),
T.int64(1),
T.int64(1),
T.int64(1),
T.int64(1),
):
with T.block("T_batch_matmul_NT_update"):
v_b = T.axis.spatial(
T.int64(12),
b_3
+ b_0_i_0_fused // T.int64(197) * T.int64(6)
+ b_1 * T.int64(6)
+ b_2,
)
v_i = T.axis.spatial(
T.int64(197), b_0_i_0_fused % T.int64(197) + i_1 + i_2 + i_3
)
v_j = T.axis.spatial(
T.int64(197), j_3 + j_0 * T.int64(197) + j_1 * T.int64(197) + j_2
)
v_k = T.axis.reduce(T.int64(64), k_0 + k_1)
T_batch_matmul_NT[v_b, v_i, v_j] = T_batch_matmul_NT[
v_b, v_i, v_j
] + T.Cast("int32", p0[v_b, v_i, v_k]) * T.Cast(
"int32", p1[v_b, v_j, v_k]
)
def expected(
p0: T.Buffer((T.int64(12), T.int64(197), T.int64(64)), "int8"),
p1: T.Buffer((T.int64(12), T.int64(197), T.int64(64)), "int8"),
T_batch_matmul_NT: T.Buffer((T.int64(12), T.int64(197), T.int64(197)), "int32"),
):
T.func_attr({"tir.noalias": True, "global_symbol": "main", "layout_free_buffers": [1]})
p1_global = T.alloc_buffer(
[T.int64(2), T.int64(64), T.int64(6), T.int64(197)], dtype="int8"
)
for ax0, ax1, ax2 in T.grid(T.int64(12), T.int64(197), T.int64(64)):
with T.block("p1_global"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(p1[v0, v1, v2])
T.writes(p1_global[v0 // T.int64(6), v2, v0 % T.int64(6), v1])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
p1_global[v0 // T.int64(6), v2, v0 % T.int64(6), v1] = p1[v0, v1, v2]
for b_0_i_0_fused in T.parallel(T.int64(394)):
for j_0, b_1, i_1, j_1 in T.grid(T.int64(1), T.int64(1), T.int64(1), T.int64(1)):
for b_2_init, i_2_init, j_2_init, b_3_init, i_3_init, j_3_init in T.grid(
T.int64(6), T.int64(1), T.int64(197), T.int64(1), T.int64(1), T.int64(1)
):
with T.block("T_batch_matmul_NT_init"):
v_b = T.axis.spatial(
T.int64(12),
b_3_init
+ b_0_i_0_fused // T.int64(197) * T.int64(6)
+ b_1 * T.int64(6)
+ b_2_init,
)
v_i = T.axis.spatial(
T.int64(197), b_0_i_0_fused % T.int64(197) + i_1 + i_2_init + i_3_init
)
v_j = T.axis.spatial(
T.int64(197),
j_3_init + j_0 * T.int64(197) + j_1 * T.int64(197) + j_2_init,
)
T_batch_matmul_NT[v_b, v_i, v_j] = 0
for k_0, b_2, i_2, j_2, k_1, b_3, i_3, j_3 in T.grid(
T.int64(64),
T.int64(6),
T.int64(1),
T.int64(197),
T.int64(1),
T.int64(1),
T.int64(1),
T.int64(1),
):
with T.block("T_batch_matmul_NT_update"):
v_b = T.axis.spatial(
T.int64(12),
b_3
+ b_0_i_0_fused // T.int64(197) * T.int64(6)
+ b_1 * T.int64(6)
+ b_2,
)
v_i = T.axis.spatial(
T.int64(197), b_0_i_0_fused % T.int64(197) + i_1 + i_2 + i_3
)
v_j = T.axis.spatial(
T.int64(197), j_3 + j_0 * T.int64(197) + j_1 * T.int64(197) + j_2
)
v_k = T.axis.reduce(T.int64(64), k_0 + k_1)
T_batch_matmul_NT[v_b, v_i, v_j] = T_batch_matmul_NT[
v_b, v_i, v_j
] + T.Cast("int32", p0[v_b, v_i, v_k]) * T.Cast(
"int32", p1_global[v_b // T.int64(6), v_k, v_b % T.int64(6), v_j]
)
if __name__ == "__main__":
tvm.testing.main()
| 34,965 | 55.855285 | 216 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_inject_ptx_async_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
import pytest
import numpy as np
def count_cp_async(stmt):
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Call) and n.op.name == "tir.ptx_cp_async":
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
return num_alloc[0]
def generate_global_to_shared_vectorized_copy(dtype, vector_size):
num_iters = 128 // vector_size
vector_size_expr = tvm.runtime.convert(vector_size)
@T.prim_func
def ptx_global_to_shared_copy(
A: T.Buffer((32, 128), dtype), B: T.Buffer((32, 128), dtype)
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], dtype, scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(num_iters):
for j in T.vectorized(vector_size):
A_shared[tx, i * vector_size_expr + j] = A[tx, i * vector_size_expr + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
return ptx_global_to_shared_copy
@T.prim_func
def ptx_global_to_shared_copy_fp32x1(
A: T.Buffer((32, 128), "float32"), B: T.Buffer((32, 128), "float32")
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float32", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(128):
A_shared[tx, i] = A[tx, i]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@T.prim_func
def ptx_global_to_shared_dyn_copy_fp16x8(
A: T.Buffer((32, 128), "float16"),
B: T.Buffer((32, 128), "float16"),
C: T.Buffer((32, 128), "float16"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
B_shared = T.alloc_buffer([32, 128], "float16", scope="shared.dyn")
T.reads(A[0:32, 0:128], B[0:32, 0:128])
T.writes(C[0:32, 0:128])
T.attr("default", "async_scope", 1)
for i in T.serial(16):
for j in T.vectorized(8):
A_shared[tx, i * 8 + j] = A[tx, i * 8 + j]
B_shared[tx, i * 8 + j] = B[tx, i * 8 + j]
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
C[tx, i] = A_shared[tx, i] + B_shared[tx, i]
@tvm.testing.requires_cuda
def test_inject_async_copy():
for dtype, vec_size in [("float16", 8), ("float16", 4), ("float32", 4), ("float32", 1)]:
if vec_size == 1:
f = ptx_global_to_shared_copy_fp32x1
else:
f = generate_global_to_shared_vectorized_copy(dtype, vec_size)
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
if vec_size > 1:
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 1
if not tvm.testing.is_ampere_or_newer():
continue
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda")
A_np = np.random.rand(32, 128).astype(dtype)
B_np = np.zeros((32, 128)).astype(dtype)
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
@tvm.testing.requires_cuda
def test_inject_async_copy_shared_dyn():
f = ptx_global_to_shared_dyn_copy_fp16x8
mod = tvm.IRModule.from_expr(f)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.MergeDynamicSharedMemoryAllocations()(mod)
mod = tvm.tir.transform.InjectPTXAsyncCopy()(mod)
assert count_cp_async(mod["main"].body) == 2
if not tvm.testing.is_ampere_or_newer():
return
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
mod = tvm.build(tvm.IRModule.from_expr(f), target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.random.rand(32, 128).astype("float16")
C_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
C_nd = tvm.nd.array(C_np, device=dev)
mod(A_nd, B_nd, C_nd)
tvm.testing.assert_allclose(C_nd.numpy(), A_np + B_np)
expected_cuda_script = r"""
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11))
#define TVM_ENABLE_L2_PREFETCH 1
#else
#define TVM_ENABLE_L2_PREFETCH 0
#endif
#ifdef _WIN32
using uint = unsigned int;
using uchar = unsigned char;
using ushort = unsigned short;
using int64_t = long long;
using uint64_t = unsigned long long;
#else
#define uint unsigned int
#define uchar unsigned char
#define ushort unsigned short
#define int64_t long long
#define uint64_t unsigned long long
#endif
extern "C" __global__ void __launch_bounds__(16) main_kernel(float* __restrict__ A, float* __restrict__ B, float* __restrict__ C) {
__shared__ float A_shared[64];
__shared__ float B_shared[64];
A_shared[((int)threadIdx.x)] = 0.000000e+00f;
B_shared[((int)threadIdx.x)] = 0.000000e+00f;
__asm__ __volatile__("cp.async.commit_group;");
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_shared + (((int)threadIdx.x) + 16)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.ca.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + (((int)threadIdx.x) * 14))), "n"(4)
);
}
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + (((int)threadIdx.x) + 16)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.ca.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + (((int)threadIdx.x) * 14))), "n"(4)
);
}
__asm__ __volatile__("cp.async.commit_group;");
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(A_shared + (((int)threadIdx.x) + 32)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.ca.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(A + ((((int)threadIdx.x) * 14) + 1))), "n"(4)
);
}
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(B_shared + (((int)threadIdx.x) + 32)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.ca.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.ca.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(B + ((((int)threadIdx.x) * 14) + 1))), "n"(4)
);
}
__asm__ __volatile__("cp.async.commit_group;");
for (int i = 0; i < 13; ++i) {
bool cse_var_1 = (i < 12);
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }"
: "=r"(addr)
: "l"((void *)(A_shared + ((((i + 3) & 3) * 16) + ((int)threadIdx.x))))
);
int pred_guard = (int)cse_var_1;
__asm__ __volatile__(
"{ .reg .pred p;"
" setp.ne.b32 p, %0, 0;"
#if TVM_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;"
#endif
" @!p st.shared.u32 [%1], {%4};}"
:: "r"(pred_guard), "r"(addr), "l"((void*)(A + (((((int)threadIdx.x) * 14) + i) + 2))), "n"(4), "r"(0)
);
}
__asm__ __volatile__("cp.async.commit_group;");
__asm__ __volatile__("cp.async.wait_group 5;");
__syncthreads();
C[((((int)threadIdx.x) * 16) + i)] = (A_shared[(((i & 3) * 16) + ((int)threadIdx.x))] + B_shared[(((i & 3) * 16) + ((int)threadIdx.x))]);
__syncthreads();
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }"
: "=r"(addr)
: "l"((void *)(B_shared + ((((i + 3) & 3) * 16) + ((int)threadIdx.x))))
);
int pred_guard = (int)cse_var_1;
__asm__ __volatile__(
"{ .reg .pred p;"
" setp.ne.b32 p, %0, 0;"
#if TVM_ENABLE_L2_PREFETCH
" @p cp.async.ca.shared.global.L2::128B [%1], [%2], %3;"
#else
" @p cp.async.ca.shared.global [%1], [%2], %3;"
#endif
" @!p st.shared.u32 [%1], {%4};}"
:: "r"(pred_guard), "r"(addr), "l"((void*)(B + (((((int)threadIdx.x) * 14) + i) + 2))), "n"(4), "r"(0)
);
}
__asm__ __volatile__("cp.async.commit_group;");
}
__asm__ __volatile__("cp.async.wait_group 2;");
__syncthreads();
C[((((int)threadIdx.x) * 16) + 13)] = (A_shared[(((int)threadIdx.x) + 16)] + B_shared[(((int)threadIdx.x) + 16)]);
__asm__ __volatile__("cp.async.wait_group 1;");
__syncthreads();
C[((((int)threadIdx.x) * 16) + 14)] = (A_shared[(((int)threadIdx.x) + 32)] + B_shared[(((int)threadIdx.x) + 32)]);
__asm__ __volatile__("cp.async.wait_group 0;");
__syncthreads();
C[((((int)threadIdx.x) * 16) + 15)] = (A_shared[(((int)threadIdx.x) + 48)] + B_shared[(((int)threadIdx.x) + 48)]);
}
"""
@pytest.fixture
def postproc_if_missing_async_support():
arch = tvm.contrib.nvcc.get_target_compute_version()
major, _ = tvm.contrib.nvcc.parse_compute_version(arch)
support_async = major >= 8
func_name = "tvm_callback_cuda_postproc"
prev_postproc = tvm.get_global_func(func_name, allow_missing=True)
# Store the generated code prior to the post-processing. This
# way, even though the generated code doesn't compile on platforms
# that do not support async, the comparison against an expected
# output can still be performed. We cannot use
# `mod.get_source()`, as that contains the source after all
# post-processing.
original_code = None
def get_original_code():
nonlocal original_code
return original_code
@tvm.register_func(func_name, override=True)
def tvm_callback_cuda_postproc(code, _):
nonlocal original_code
original_code = code
if support_async:
return code
else:
ret = []
for line in code.split("\n"):
ret.append(line)
ret.append("\n")
if line.startswith('extern "C" __global__') and line.endswith("{"):
break
ret.append("}")
return "".join(ret)
yield get_original_code
# Restore previous postproc func to avoid impacting other tests
if prev_postproc is None:
tvm._ffi.registry.remove_global_func(func_name)
else:
tvm.register_func(func_name, prev_postproc, override=True)
@tvm.testing.requires_cuda
def test_cp_async_in_if_then_else(postproc_if_missing_async_support):
@T.prim_func
def simple_compute(
A: T.Buffer((16, 14), "float32"),
B: T.Buffer((16, 14), "float32"),
C: T.Buffer((16, 16), "float32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for tx in T.thread_binding(0, 16, thread="threadIdx.x"):
for i in T.serial(
16,
annotations={
"software_pipeline_stage": [0, 0, 3],
"software_pipeline_order": [0, 2, 1],
"software_pipeline_async_stages": [0],
},
):
with T.block("compute"):
T.reads(A[tx, i])
T.writes(C[tx, i])
A_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
B_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared")
with T.block():
T.reads(A[tx, i])
T.writes(A_shared[tx, 0])
A_shared[tx, 0] = T.if_then_else(
1 <= i and i < 15, A[tx, i - 1], T.float32(0), dtype="float32"
)
with T.block():
T.reads(B[tx, i])
T.writes(B_shared[tx, 0])
B_shared[tx, 0] = T.if_then_else(
1 <= i and i < 15, B[tx, i - 1], T.float32(0), dtype="float32"
)
with T.block():
T.reads(A_shared[tx, 0], B_shared[tx, 0])
T.writes(C[tx, i])
C[tx, i] = A_shared[tx, 0] + B_shared[tx, 0]
mod = tvm.IRModule.from_expr(simple_compute)
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
tvm.build(mod, target="cuda")
generated_code = postproc_if_missing_async_support()
assert generated_code == expected_cuda_script
@tvm.testing.requires_cuda
def test_vectorize_cp_async_in_if_then_else(postproc_if_missing_async_support):
@T.prim_func
def complex_compute(
A: T.Buffer((2, 16, 16, 1280), "float16"),
W: T.Buffer((1280, 3, 3, 1280), "float16"),
Conv: T.Buffer((512, 1280), "float16"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
data_im2col_reindex_shared_dyn = T.alloc_buffer((512, 11520), "float16", scope="shared.dyn")
data_im2col_reindex_shared_dyn_wmma_matrix_a = T.alloc_buffer(
(512, 11520), "float16", scope="wmma.matrix_a"
)
weight_flatten_reindex_shared_dyn = T.alloc_buffer(
(1280, 11520), "float16", scope="shared.dyn"
)
weight_flatten_reindex_shared_dyn_wmma_matrix_b = T.alloc_buffer(
(1280, 11520), "float16", scope="wmma.matrix_b"
)
Conv_reindex_wmma_accumulator = T.alloc_buffer(
(512, 1280), "float16", scope="wmma.accumulator"
)
for x_0_0 in T.thread_binding(8, thread="blockIdx.y"):
for y_0_0 in T.thread_binding(20, thread="blockIdx.x"):
for x_0_1 in T.thread_binding(2, thread="threadIdx.y"):
for y_0_1 in T.thread_binding(2, thread="threadIdx.z"):
for x_0_2_init, y_0_2_init in T.grid(2, 2):
with T.block("Conv_init_o"):
v_x_o = T.axis.spatial(32, x_0_0 * 4 + x_0_1 * 2 + x_0_2_init)
v_y_o = T.axis.spatial(80, y_0_0 * 4 + y_0_1 * 2 + y_0_2_init)
T.reads()
T.writes(
Conv_reindex_wmma_accumulator[
v_x_o * 16 : v_x_o * 16 + 16, v_y_o * 16 : v_y_o * 16 + 16
]
)
C_s0 = T.int32()
C_s1 = T.int32()
C = T.match_buffer(
Conv_reindex_wmma_accumulator[
v_x_o * 16 : v_x_o * 16 + 16, v_y_o * 16 : v_y_o * 16 + 16
],
(16, 16),
"float16",
strides=(C_s0, C_s1),
scope="wmma.accumulator",
offset_factor=16,
)
T.tvm_fill_fragment(
C.data,
16,
16,
16,
C.elem_offset // C_s0 // 16 * (C_s0 // 16)
+ C.elem_offset % C_s0 // 16,
T.float32(0),
)
for k_0_0 in T.serial(
180,
annotations={
"software_pipeline_stage": [0, 0, 1],
"software_pipeline_order": [0, 1, 2],
"software_pipeline_async_stages": [0],
},
):
for ax0_ax1_0_fused_0 in range(4):
for ax0_ax1_0_fused_1 in T.thread_binding(2, thread="threadIdx.z"):
for ax0_ax1_0_fused_2 in T.thread_binding(
2, thread="threadIdx.y"
):
for ax0_ax1_0_fused_3 in T.thread_binding(
32, thread="threadIdx.x"
):
with T.block("data_im2col_reindex_shared.dyn_o"):
v0 = T.axis.spatial(
512,
x_0_0 * 64
+ (
ax0_ax1_0_fused_0 * 128
+ ax0_ax1_0_fused_1 * 64
+ ax0_ax1_0_fused_2 * 32
+ ax0_ax1_0_fused_3
)
// 8,
)
v1_o = T.axis.spatial(
1440,
k_0_0 * 8
+ (
ax0_ax1_0_fused_0 * 128
+ ax0_ax1_0_fused_1 * 64
+ ax0_ax1_0_fused_2 * 32
+ ax0_ax1_0_fused_3
)
% 8,
)
T.reads(
A[
v0 // 256,
v1_o // 480 + v0 % 256 // 16 - 1,
v1_o % 480 // 160 + v0 % 16 - 1,
v1_o % 160 * 8 : v1_o % 160 * 8 + 8,
]
)
T.writes(
data_im2col_reindex_shared_dyn[
v0, v1_o * 8 : v1_o * 8 + 8
]
)
for ax1_1 in T.vectorized(8):
with T.block("data_im2col_reindex_shared.dyn"):
v1_i = T.axis.spatial(8, ax1_1)
T.reads(
A[
v0 // 256,
v1_o // 480 + v0 % 256 // 16 - 1,
v1_o % 480 // 160 + v0 % 16 - 1,
v1_o % 160 * 8 + v1_i,
]
)
T.writes(
data_im2col_reindex_shared_dyn[
v0, v1_o * 8 + v1_i
]
)
T.block_attr(
{"buffer_dim_align": [[0, 0, 32, 8]]}
)
data_im2col_reindex_shared_dyn[
v0, v1_o * 8 + v1_i
] = T.if_then_else(
1 <= v1_o // 480 + v0 % 256 // 16
and v1_o // 480 + v0 % 256 // 16 < 17
and 1 <= v1_o % 480 // 160 + v0 % 16
and v1_o % 480 // 160 + v0 % 16 < 17,
A[
v0 // 256,
v1_o // 480 + v0 % 256 // 16 - 1,
v1_o % 480 // 160 + v0 % 16 - 1,
v1_o % 160 * 8 + v1_i,
],
T.float16(0),
)
for ax0_ax1_0_fused_0 in range(4):
for ax0_ax1_0_fused_1 in T.thread_binding(2, thread="threadIdx.z"):
for ax0_ax1_0_fused_2 in T.thread_binding(
2, thread="threadIdx.y"
):
for ax0_ax1_0_fused_3 in T.thread_binding(
32, thread="threadIdx.x"
):
for ax1_1 in T.vectorized(8):
with T.block("weight_flatten_reindex_shared.dyn"):
v0 = T.axis.spatial(
1280,
y_0_0 * 64
+ (
ax0_ax1_0_fused_0 * 128
+ ax0_ax1_0_fused_1 * 64
+ ax0_ax1_0_fused_2 * 32
+ ax0_ax1_0_fused_3
)
// 8,
)
v1 = T.axis.spatial(
11520,
k_0_0 * 64
+ (
ax0_ax1_0_fused_0 * 128
+ ax0_ax1_0_fused_1 * 64
+ ax0_ax1_0_fused_2 * 32
+ ax0_ax1_0_fused_3
)
% 8
* 8
+ ax1_1,
)
T.reads(
W[
v0,
v1 // 3840,
v1 % 3840 // 1280,
v1 % 1280,
]
)
T.writes(
weight_flatten_reindex_shared_dyn[v0, v1]
)
T.block_attr(
{"buffer_dim_align": [[0, 0, 32, 8]]}
)
weight_flatten_reindex_shared_dyn[v0, v1] = W[
v0,
v1 // 1280 // 3,
v1 // 1280 % 3,
v1 % 1280,
]
for k_0_1 in range(4):
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("data_im2col_reindex_shared.dyn_wmma.matrix_a_o"):
v0_o = T.axis.spatial(32, x_0_0 * 4 + x_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(720, k_0_0 * 4 + k_0_1 + ax1_0)
T.reads(
data_im2col_reindex_shared_dyn[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
]
)
T.writes(
data_im2col_reindex_shared_dyn_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
]
)
A_s0 = T.int32()
A_s1 = T.int32()
A_1 = T.match_buffer(
data_im2col_reindex_shared_dyn[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
],
(16, 16),
"float16",
strides=(A_s0, A_s1),
scope="shared.dyn",
offset_factor=16,
)
C_s0 = T.int32()
C_s1 = T.int32()
C = T.match_buffer(
data_im2col_reindex_shared_dyn_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
],
(16, 16),
"float16",
strides=(C_s0, C_s1),
scope="wmma.matrix_a",
offset_factor=16,
)
T.tvm_load_matrix_sync(
C.data,
16,
16,
16,
C.elem_offset // C_s0 // 16 * (C_s0 // 16)
+ C.elem_offset % C_s0 // 16,
T.tvm_access_ptr(
T.type_annotation("float16"),
A_1.data,
A_1.elem_offset,
A_s0 * 16,
1,
),
A_s0,
"row_major",
)
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block(
"weight_flatten_reindex_shared.dyn_wmma.matrix_b_o"
):
v0_o = T.axis.spatial(80, y_0_0 * 4 + y_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(720, k_0_0 * 4 + k_0_1 + ax1_0)
T.reads(
weight_flatten_reindex_shared_dyn[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
]
)
T.writes(
weight_flatten_reindex_shared_dyn_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
]
)
A_s0 = T.int32()
A_s1 = T.int32()
A_1 = T.match_buffer(
weight_flatten_reindex_shared_dyn[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
],
(16, 16),
"float16",
strides=(A_s0, A_s1),
scope="shared.dyn",
offset_factor=16,
)
C_s0 = T.int32()
C_s1 = T.int32()
C = T.match_buffer(
weight_flatten_reindex_shared_dyn_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16,
v1_o * 16 : v1_o * 16 + 16,
],
(16, 16),
"float16",
strides=(C_s0, C_s1),
scope="wmma.matrix_b",
offset_factor=16,
)
T.tvm_load_matrix_sync(
C.data,
16,
16,
16,
C.elem_offset // C_s0 // 16 * (C_s0 // 16)
+ C.elem_offset % C_s0 // 16,
T.tvm_access_ptr(
T.type_annotation("float16"),
A_1.data,
A_1.elem_offset,
A_s0 * 16,
1,
),
A_s0,
"col_major",
)
for x_0_2, y_0_2 in T.grid(2, 2):
with T.block("Conv_update_o"):
v_x_o = T.axis.spatial(32, x_0_0 * 4 + x_0_1 * 2 + x_0_2)
v_y_o = T.axis.spatial(80, y_0_0 * 4 + y_0_1 * 2 + y_0_2)
v_k_o = T.axis.reduce(720, k_0_0 * 4 + k_0_1)
T.reads(
Conv_reindex_wmma_accumulator[
v_x_o * 16 : v_x_o * 16 + 16,
v_y_o * 16 : v_y_o * 16 + 16,
],
data_im2col_reindex_shared_dyn_wmma_matrix_a[
v_x_o * 16 : v_x_o * 16 + 16,
v_k_o * 16 : v_k_o * 16 + 16,
],
weight_flatten_reindex_shared_dyn_wmma_matrix_b[
v_y_o * 16 : v_y_o * 16 + 16,
v_k_o * 16 : v_k_o * 16 + 16,
],
)
T.writes(
Conv_reindex_wmma_accumulator[
v_x_o * 16 : v_x_o * 16 + 16,
v_y_o * 16 : v_y_o * 16 + 16,
]
)
A_s0 = T.int32()
A_s1 = T.int32()
A_1 = T.match_buffer(
data_im2col_reindex_shared_dyn_wmma_matrix_a[
v_x_o * 16 : v_x_o * 16 + 16,
v_k_o * 16 : v_k_o * 16 + 16,
],
(16, 16),
"float16",
strides=(A_s0, A_s1),
scope="wmma.matrix_a",
offset_factor=16,
)
B_s0 = T.int32()
B_s1 = T.int32()
B = T.match_buffer(
weight_flatten_reindex_shared_dyn_wmma_matrix_b[
v_y_o * 16 : v_y_o * 16 + 16,
v_k_o * 16 : v_k_o * 16 + 16,
],
(16, 16),
"float16",
strides=(B_s0, B_s1),
scope="wmma.matrix_b",
offset_factor=16,
)
C_s0 = T.int32()
C_s1 = T.int32()
C = T.match_buffer(
Conv_reindex_wmma_accumulator[
v_x_o * 16 : v_x_o * 16 + 16,
v_y_o * 16 : v_y_o * 16 + 16,
],
(16, 16),
"float16",
strides=(C_s0, C_s1),
scope="wmma.accumulator",
offset_factor=16,
)
T.tvm_mma_sync(
C.data,
C.elem_offset // C_s0 // 16 * (C_s0 // 16)
+ C.elem_offset % C_s0 // 16,
A_1.data,
A_1.elem_offset // A_s0 // 16 * (A_s0 // 16)
+ A_1.elem_offset % A_s0 // 16,
B.data,
B.elem_offset // B_s0 // 16 * (B_s0 // 16)
+ B.elem_offset % B_s0 // 16,
C.data,
C.elem_offset // C_s0 // 16 * (C_s0 // 16)
+ C.elem_offset % C_s0 // 16,
)
for ax0_0, ax1_0 in T.grid(2, 2):
with T.block("Conv_reindex_wmma.accumulator_o"):
v0_o = T.axis.spatial(32, x_0_0 * 4 + x_0_1 * 2 + ax0_0)
v1_o = T.axis.spatial(80, y_0_0 * 4 + y_0_1 * 2 + ax1_0)
T.reads(
Conv_reindex_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
T.writes(
Conv[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]
)
A_s0 = T.int32()
A_s1 = T.int32()
A_1 = T.match_buffer(
Conv_reindex_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
(16, 16),
"float16",
strides=(A_s0, A_s1),
scope="wmma.accumulator",
offset_factor=16,
)
C_s0 = T.int32()
C_s1 = T.int32()
C = T.match_buffer(
Conv[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16],
(16, 16),
"float16",
strides=(C_s0, C_s1),
offset_factor=16,
)
T.tvm_store_matrix_sync(
A_1.data,
16,
16,
16,
A_1.elem_offset // A_s0 // 16 * (A_s0 // 16)
+ A_1.elem_offset % A_s0 // 16,
T.tvm_access_ptr(
T.type_annotation("float16"),
C.data,
C.elem_offset,
C_s0 * 16,
2,
),
C_s0,
"row_major",
)
mod = tvm.IRModule.from_expr(complex_compute)
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
tvm.build(mod, target="cuda")
generated_code = postproc_if_missing_async_support()
# generated_code must contain " setp.ne.b32 p, %0, 0;"
assert "setp.ne.b32" in generated_code
if __name__ == "__main__":
tvm.testing.main()
| 44,040 | 47.503304 | 141 | py |
tvm | tvm-main/tests/python/unittest/test_micro_transport.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for common micro transports."""
import logging
import sys
import unittest
import pytest
import tvm.testing
# Implementing as a fixture so that the tvm.micro import doesn't occur
# until fixture setup time. This is necessary for pytest's collection
# phase to work when USE_MICRO=OFF, while still explicitly listing the
# tests as skipped.
@tvm.testing.fixture
def transport():
import tvm.micro
class MockTransport_Impl(tvm.micro.transport.Transport):
def __init__(self):
self.exc = None
self.to_return = None
def _raise_or_return(self):
if self.exc is not None:
to_raise = self.exc
self.exc = None
raise to_raise
elif self.to_return is not None:
to_return = self.to_return
self.to_return = None
return to_return
else:
assert False, "should not get here"
def open(self):
pass
def close(self):
pass
def timeouts(self):
raise NotImplementedError()
def read(self, n, timeout_sec):
return self._raise_or_return()
def write(self, data, timeout_sec):
return self._raise_or_return()
return MockTransport_Impl()
@tvm.testing.fixture
def transport_logger(transport):
logger = logging.getLogger("transport_logger_test")
return tvm.micro.transport.TransportLogger("foo", transport, logger=logger)
@tvm.testing.fixture
def get_latest_log(caplog):
def inner():
return caplog.records[-1].getMessage()
with caplog.at_level(logging.INFO, "transport_logger_test"):
yield inner
@tvm.testing.requires_micro
def test_open(transport_logger, get_latest_log):
transport_logger.open()
assert get_latest_log() == "foo: opening transport"
@tvm.testing.requires_micro
def test_close(transport_logger, get_latest_log):
transport_logger.close()
assert get_latest_log() == "foo: closing transport"
@tvm.testing.requires_micro
def test_read_normal(transport, transport_logger, get_latest_log):
transport.to_return = b"data"
transport_logger.read(23, 3.0)
assert get_latest_log() == (
"foo: read { 3.00s} 23 B -> [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_read_multiline(transport, transport_logger, get_latest_log):
transport.to_return = b"data" * 6
transport_logger.read(23, 3.0)
assert get_latest_log() == (
"foo: read { 3.00s} 23 B -> [ 24 B]:\n"
"0000 64 61 74 61 64 61 74 61 64 61 74 61 64 61 74 61 datadatadatadata\n"
"0010 64 61 74 61 64 61 74 61 datadata"
)
@tvm.testing.requires_micro
def test_read_no_timeout_prints(transport, transport_logger, get_latest_log):
transport.to_return = b"data"
transport_logger.read(15, None)
assert get_latest_log() == (
"foo: read { None } 15 B -> [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_read_io_timeout(transport, transport_logger, get_latest_log):
# IoTimeoutError includes the timeout value.
transport.exc = tvm.micro.transport.IoTimeoutError()
with pytest.raises(tvm.micro.transport.IoTimeoutError):
transport_logger.read(23, 0.0)
assert get_latest_log() == ("foo: read { 0.00s} 23 B -> [IoTimeoutError 0.00s]")
@tvm.testing.requires_micro
def test_read_other_exception(transport, transport_logger, get_latest_log):
# Other exceptions are logged by name.
transport.exc = tvm.micro.transport.TransportClosedError()
with pytest.raises(tvm.micro.transport.TransportClosedError):
transport_logger.read(8, 0.0)
assert get_latest_log() == ("foo: read { 0.00s} 8 B -> [err: TransportClosedError]")
@tvm.testing.requires_micro
def test_read_keyboard_interrupt(transport, transport_logger, get_latest_log):
# KeyboardInterrupt produces no log record.
transport.exc = KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
transport_logger.read(8, 0.0)
with pytest.raises(IndexError):
get_latest_log()
@tvm.testing.requires_micro
def test_write_normal(transport, transport_logger, get_latest_log):
transport.to_return = 3
transport_logger.write(b"data", 3.0)
assert get_latest_log() == (
"foo: write { 3.00s} <- [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_write_multiline(transport, transport_logger, get_latest_log):
# Normal log, multi-line data written.
transport.to_return = 20
transport_logger.write(b"data" * 6, 3.0)
assert get_latest_log() == (
"foo: write { 3.00s} <- [ 24 B]:\n"
"0000 64 61 74 61 64 61 74 61 64 61 74 61 64 61 74 61 datadatadatadata\n"
"0010 64 61 74 61 64 61 74 61 datadata"
)
@tvm.testing.requires_micro
def test_write_no_timeout_prints(transport, transport_logger, get_latest_log):
transport.to_return = 3
transport_logger.write(b"data", None)
assert get_latest_log() == (
"foo: write { None } <- [ 4 B]: 64 61 74 61"
" data"
)
@tvm.testing.requires_micro
def test_write_io_timeout(transport, transport_logger, get_latest_log):
# IoTimeoutError includes the timeout value.
transport.exc = tvm.micro.transport.IoTimeoutError()
with pytest.raises(tvm.micro.transport.IoTimeoutError):
transport_logger.write(b"data", 0.0)
assert get_latest_log() == ("foo: write { 0.00s} <- [ 4 B]: [IoTimeoutError 0.00s]")
@tvm.testing.requires_micro
def test_write_other_exception(transport, transport_logger, get_latest_log):
# Other exceptions are logged by name.
transport.exc = tvm.micro.transport.TransportClosedError()
with pytest.raises(tvm.micro.transport.TransportClosedError):
transport_logger.write(b"data", 0.0)
assert get_latest_log() == ("foo: write { 0.00s} <- [ 4 B]: [err: TransportClosedError]")
@tvm.testing.requires_micro
def test_write_keyboard_interrupt(transport, transport_logger, get_latest_log):
# KeyboardInterrupt produces no log record.
transport.exc = KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
transport_logger.write(b"data", 0.0)
with pytest.raises(IndexError):
get_latest_log()
if __name__ == "__main__":
tvm.testing.main()
| 7,400 | 31.747788 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_arith_detect_linear_equation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_basic():
a = te.var("a")
b = te.var("b")
m = tvm.arith.detect_linear_equation(a * 4 + b * 6 + 7, [a])
assert m[0].value == 4
tvm.testing.assert_prim_expr_equal(m[1], b * 6 + 7)
m = tvm.arith.detect_linear_equation(a * 4 * (a + 1) + b * 6 + 7, [a])
assert len(m) == 0
m = tvm.arith.detect_linear_equation(a * 4 + (a + 1) + b * 6 + 7, [a])
assert m[0].value == 5
tvm.testing.assert_prim_expr_equal(m[1], b * 6 + 7 + 1)
m = tvm.arith.detect_linear_equation(a * b + 7, [a])
assert m[0] == b
m = tvm.arith.detect_linear_equation(b * 7, [a])
assert m[0].value == 0
m = tvm.arith.detect_linear_equation(b * 7, [])
assert len(m) == 1
tvm.testing.assert_prim_expr_equal(m[0], b * 7)
def test_multivariate():
v = [te.var("v%d" % i) for i in range(4)]
b = te.var("b")
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8, v)
tvm.testing.assert_prim_expr_equal(m[0], b + 5)
assert m[1].value == 8
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8 * v[2], v)
assert len(m) == 0
m = tvm.arith.detect_linear_equation(v[0] * (b + 4) + v[0] + v[1] * 8 * v[1] + v[3], v)
assert len(m) == 0
m = tvm.arith.detect_linear_equation(((v[0] * b + v[1]) * 8 + v[2] + 1) * 2, v)
assert m[1].value == 16
assert m[2].value == 2
assert m[len(m) - 1].value == 2
m = tvm.arith.detect_linear_equation((v[0] - v[1]), [v[2]])
assert m[0].value == 0
tvm.testing.assert_prim_expr_equal(m[1], v[0] - v[1])
m = tvm.arith.detect_linear_equation((v[0] - v[1]), [])
assert len(m) == 1
tvm.testing.assert_prim_expr_equal(m[0], v[0] - v[1])
if __name__ == "__main__":
test_basic()
test_multivariate()
| 2,608 | 31.6125 | 91 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from typing import List
import pytest
import tvm
import tvm.testing
from tvm.meta_schedule.testing import te_workload
from tvm.script import tir as T
from tvm.te import create_prim_func
from tvm.tir import (
Evaluate,
For,
ForKind,
IndexMap,
Schedule,
Var,
decl_buffer,
floordiv,
floormod,
)
from tvm.tir.analysis import expr_deep_equal
from tvm.tir.function import TensorIntrin
from tvm.tir.schedule.analysis import (
TensorizeInfo,
get_auto_tensorize_mapping_info,
get_tensorize_loop_mapping,
is_output_block,
suggest_index_map,
)
from tvm.tir.stmt_functor import pre_order_visit
from tvm.tir.tensor_intrin.cuda import (
WMMA_SYNC_16x16x16_f16f16f16_INTRIN,
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
)
from tvm.tir.tensor_intrin.x86 import dot_product_16x4_u8i8i32_desc
def _make_vars(*args: str) -> List[Var]:
return [Var(arg, dtype="int32") for arg in args]
def _make_loops(loop_vars: List[Var], extents: List[int]) -> List[For]:
assert len(loop_vars) == len(extents)
return [
For(
loop_var=loop_var,
min_val=0,
extent=extent,
kind=ForKind.SERIAL,
body=Evaluate(0),
)
for loop_var, extent in zip(loop_vars, extents)
]
def test_suggest_index_map_simple():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8, 256]),
indices=[
floordiv(i, 16) * 4 + floordiv(j, 16),
floormod(i, 16) * 16 + floormod(j, 16),
],
loops=_make_loops(
loop_vars=[i, j],
extents=[32, 64],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x, y: [
floordiv(x, 4),
floordiv(y, 16),
floormod(x, 4),
floormod(y, 16),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_bijective():
i, j = _make_vars("i", "j")
index_map = suggest_index_map(
buffer=decl_buffer(shape=[8]),
indices=[floormod(j, 4) * 2 + i],
loops=_make_loops(
loop_vars=[i, j],
extents=[2, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda x: [
floormod(x, 2),
floordiv(x, 2),
],
)
assert index_map.is_equivalent_to(expected_index_map)
def test_suggest_index_map_winograd():
"""use case in winograd conv where the indices are complicated"""
fused_outer, i3_3_fused, i4_0, i4_1 = _make_vars("fused_outer", "i3_3_fused", "i4_0", "i4_1")
eps = floordiv(fused_outer, 336) * 2 + floordiv(floormod(fused_outer, 16), 8)
nu = floordiv(floormod(fused_outer, 336), 112) * 2 + floordiv(floormod(fused_outer, 8), 4)
co = floormod(fused_outer, 4) * 32 + i3_3_fused
ci = (i4_0 * 32) + i4_1
buffer = decl_buffer(shape=[6, 6, 128, 128])
index_map = suggest_index_map(
buffer=buffer,
indices=[eps, nu, co, ci],
loops=_make_loops(
loop_vars=[fused_outer, i3_3_fused, i4_0, i4_1],
extents=[1008, 32, 4, 32],
),
predicate=True,
)
expected_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3: (
floordiv(i0, 2),
floordiv(i1, 2),
floormod(i0, 2),
floormod(i1, 2) * 4 + floordiv(i2, 32),
floormod(i2, 32),
floordiv(i3, 32),
floormod(i3, 32),
)
)
assert index_map.is_equivalent_to(expected_index_map)
inverse_index_map = index_map.inverse(buffer.shape)
expected_inverse_index_map = IndexMap.from_func(
lambda i0, i1, i2, i3, i4, i5, i6: (
((i0 * 2) + i2),
i1 * 2 + floordiv(i3, 4),
floormod(i3, 4) * 32 + i4,
((i5 * 32) + i6),
)
)
assert inverse_index_map.is_equivalent_to(expected_inverse_index_map)
@tvm.script.ir_module
class DenseTIRModule:
@T.prim_func
def main(
placeholder: T.Buffer((1024, 1024), "uint8"),
placeholder_1: T.Buffer((64, 256, 16, 4), "int8"),
compute: T.Buffer((1024, 1024), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class Conv2dNCHWcTIRModule:
@T.prim_func
def main(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
"int32",
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def collect_loops(prim_func):
loops = []
def callback(node):
if isinstance(node, tvm.tir.For):
loops.append(node)
return True
pre_order_visit(prim_func.body, callback)
return loops
def test_get_tensorize_loop_mapping_dense_16x4():
s = Schedule(DenseTIRModule)
block = s.get_block("compute")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
assert isinstance(info, TensorizeInfo)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
_, loop_j, loop_k = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(loop_j)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(loop_k)
def test_get_tensorize_loop_mapping_conv2d_nchwc_16x4():
s = Schedule(Conv2dNCHWcTIRModule)
block = s.get_block("conv2d_NCHWc_int8")
info = get_tensorize_loop_mapping(s, block, dot_product_16x4_u8i8i32_desc)
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
desc_loops = collect_loops(dot_product_16x4_u8i8i32_desc)
# i4 corresonds to the inner output channel axis of the NCHWc output tensor
# for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
_, _, _, _, i4, _, _, _, _, i9 = s.get_loops(block)
assert desc_loops[0] in desc_loop_to_sref and desc_loops[1] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i4)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i9)
def test_get_tensorize_loop_mapping_matmul_mma():
@T.prim_func
def matmul_16x16x16xf16f16f16_desc(
A: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
B: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
C: T.Buffer((16, 16), "float16", align=64, offset_factor=1),
) -> None:
with T.block("root"):
T.reads(C[0:16, 0:16], A[0:16, 0:16], B[0:16, 0:16])
T.writes(C[0:16, 0:16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
matmul = create_prim_func(
te_workload.matmul_relu(
n=512,
m=512,
k=512,
)
)
s = Schedule(matmul)
block = s.get_block("C")
i0, i1, i2 = s.get_loops(block)
desc_loops = collect_loops(matmul_16x16x16xf16f16f16_desc)
for do_reorder in [False, True]:
# Mapping should be invariant to the loop permutation
if do_reorder:
s.reorder(i2, i0, i1)
info = get_tensorize_loop_mapping(s, block, matmul_16x16x16xf16f16f16_desc)
assert info is not None
desc_loop_to_sref = dict((v, k) for k, v in info.loop_map.items())
for i in range(3):
assert desc_loops[i] in desc_loop_to_sref
assert s.get(desc_loop_to_sref[desc_loops[0]]) == s.get(i0)
assert s.get(desc_loop_to_sref[desc_loops[1]]) == s.get(i1)
assert s.get(desc_loop_to_sref[desc_loops[2]]) == s.get(i2)
def test_get_tensorize_loop_mapping_padding_matmul():
matmul = create_prim_func(
te_workload.matmul_relu(
n=127,
m=256,
k=65,
in_dtype="float16",
out_dtype="float16",
)
)
s = Schedule(matmul)
block = s.get_block("C")
desc = TensorIntrin.get(WMMA_SYNC_16x16x16_f16f16f16_INTRIN).desc
info = get_tensorize_loop_mapping(s, block, desc, allow_padding=True)
assert info is not None
expected_padding = [16, 1, 16]
actual_padding = info.block_iter_paddings
assert actual_padding is not None
assert len(actual_padding) == len(expected_padding)
for actual, expected in zip(actual_padding, expected_padding):
assert actual == expected
def check_index_map(workload, block_name, intrin_name, expected_index_map):
s = Schedule(workload)
block = s.get_block(block_name)
desc_func = TensorIntrin.get(intrin_name).desc
info = get_auto_tensorize_mapping_info(s, block, desc_func)
if expected_index_map is None:
assert info is None
return
assert len(info.mappings) == 1
assert IndexMap.from_func(expected_index_map).is_equivalent_to(info.mappings[0])
def test_get_auto_tensorize_mapping_info_conv2d():
conv2d = create_prim_func(
te_workload.conv2d_nhwc(4, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
def test_get_auto_tensorize_mapping_info_conv2d_unit_batch():
conv2d = create_prim_func(
te_workload.conv2d_nhwc(1, 16, 16, 64, 64, 3, 1, 1, in_dtype="float16", out_dtype="float32")
)
check_index_map(
conv2d,
"conv2d_nhwc",
WMMA_SYNC_16x16x16_f16f16f32_INTRIN,
lambda n, h, w, c, rh, rw, rc: (n * 256 + h * 16 + w, c, rh * 192 + rw * 64 + rc),
)
@pytest.mark.parametrize("b,m,n,k", [(1, 512, 512, 512), (16, 32, 32, 32)])
def test_get_auto_tensorize_mapping_info_batch_matmul(b, m, n, k):
matmul = create_prim_func(
te_workload.batch_matmul_nkkm(b, m, n, k, in_dtype="float16", out_dtype="float32")
)
check_index_map(
matmul, "Z", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, lambda b, m, n, k: (b, m, n, k)
)
@pytest.mark.parametrize(
"n,m,k,expected",
[
(
512,
512,
512,
lambda n, m, k: (
n,
m,
k,
),
),
(1, 32, 32, lambda n, m, k: (n, m, k)),
],
)
def test_get_auto_tensorize_mapping_info_matmul(n, m, k, expected):
matmul = create_prim_func(te_workload.matmul(n, m, k, in_dtype="float16", out_dtype="float32"))
check_index_map(matmul, "C", WMMA_SYNC_16x16x16_f16f16f32_INTRIN, expected)
def test_is_output_block():
@T.prim_func
def two_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
sch = tvm.tir.Schedule(two_elementwise)
block_rv = sch.get_block("C")
assert is_output_block(sch, block_rv)
def test_empty_grid():
@T.prim_func
def foo(out: T.Buffer((T.int64(1), T.int64(8), T.int64(8)), "int32")):
act = T.alloc_buffer((1, 8, 8), "int32")
for z2, y2, x2 in T.grid(1, 8, 8):
with T.block("b0"):
az, ay, ax = T.axis.remap("SSS", [z2, y2, x2])
T.writes(act[az, ay, ax])
act[az, ay, az] = T.int32(0)
# Empty grid:
for z1, y1, x1 in T.grid(0, 8, 8):
with T.block("b1"):
az, ay, ax = T.axis.remap("SSS", [z1, y1, x1])
T.reads(act[az + 1, ay, ax])
T.writes(out[az, ay, ax])
out[az, ay, ax] = act[az + 1, ay, ax]
# The block below is not needed to show the bug, but the 'out'
# buffer would be undefined without it.
for z2, y2, x2 in T.grid(1, 8, 8):
with T.block("b2"):
az, ay, ax = T.axis.remap("SSS", [z2, y2, x2])
T.writes(out[az, ay, ax])
out[az, ay, az] = T.int32(0)
# This caused a crash before.
sch = tvm.tir.Schedule(foo)
if __name__ == "__main__":
tvm.testing.main()
| 15,647 | 33.091503 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_te_schedule_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def intrin_vadd(xo, m, n):
x = te.placeholder((n,), name="vx")
y = te.placeholder((n,), name="vy")
if m % n == 0:
body = lambda i: x[i] + y[i]
else:
body = lambda i: tvm.tir.Select(
xo * n + i < m, x[i] + y[i], tvm.tir.const(0, dtype=x.dtype)
)
z = te.compute(x.shape, body, name="z")
def intrin_func(ins, outs):
xx, yy = ins
zz = outs[0]
# special handle needed to tackle tail loop part when m % n != 0
# here is tvm.min(n, m - xo * n)
return tvm.tir.call_packed("vadd", xx, yy, zz)
buffer_params = {"offset_factor": 16}
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params=buffer_params)
def intrin_gemv(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
reset = tvm.tir.call_packed("fill_zero", zz_ptr, n)
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, reset, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def intrin_gemv_no_reset(m, n):
w = te.placeholder((m, n), name="w")
x = te.placeholder((n,), name="x")
k = te.reduce_axis((0, n), name="k")
z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z")
Wb = tvm.tir.decl_buffer(
w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1]
)
def intrin_func(ins, outs):
ww, xx = ins
zz = outs[0]
ww_ptr = ww.access_ptr("r")
xx_ptr = xx.access_ptr("r")
zz_ptr = zz.access_ptr("w")
body = tvm.tir.call_packed("gemv", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0])
return body, None, update
buffer_params = {"offset_factor": 16, "data_alignment": 16}
return te.decl_tensor_intrin(
z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params
)
def test_tensorize_vadd():
def add(m):
x = te.placeholder((m,), name="x")
y = te.placeholder((m,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
return x, y, z
def check(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=factor)
vadd = intrin_vadd(xo, m, factor)
s[z].tensorize(xi, vadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z], dom_map)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].extent, factor)
assert tvm.ir.structural_equal(out_dom[z.op.axis[0]].min, xo * factor)
assert tvm.ir.structural_equal(in_dom.items()[0][1][0].extent, factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z], out_dom, in_dom, vadd)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(vadd.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_cache_write(m, factor):
x, y, z = add(m)
s = te.create_schedule(z.op)
_, _ = s[z].split(z.op.axis[0], factor=factor)
z_global = s.cache_write(z, "global")
xo, xi = z_global.op.axis
vadd = intrin_vadd(xo, m, factor)
s[z_global].tensorize(xi, vadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[z_global], dom_map)
# outer loop var will be rebased, so min value is the new loop var and extent is 1
assert tvm.ir.structural_equal(out_dom[xo].extent, 1)
assert isinstance(out_dom[xo].min, tvm.tir.Var)
assert xo.var.name == out_dom[xo].min.name
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[z_global], out_dom, in_dom, vadd)[0]
ana = tvm.arith.Analyzer()
vars = tvm.runtime.convert({xo.var: out_dom[xo].min})
vadd_body = tvm.tir.stmt_functor.substitute(vadd.op.body[0], vars)
assert tvm.ir.structural_equal(ana.simplify(body), ana.simplify(vadd_body))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [x, y, z])
def check_compute_reuse():
x, y, z = add(32)
def _intrin_vadd():
def _intrin_func(ins, outs):
return tvm.tir.call_packed("vadd", ins[0], ins[1], outs[0])
return tvm.te.decl_tensor_intrin(z.op, _intrin_func)
s = tvm.te.create_schedule(z.op)
s[z].tensorize(z.op.axis[0], _intrin_vadd())
tvm.lower(s, [x, y, z])
check(128, 16)
check_cache_write(129, 16)
check_compute_reuse()
def test_tensorize_matmul():
n = 1024
m = n
l = n
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
k = te.reduce_axis((0, l), name="k")
C = te.compute((n, m), lambda i, j: te.sum(B[j, k] * A[i, k], axis=k), name="C")
def check(factor):
s = te.create_schedule(C.op)
x, y = C.op.axis
yo, yi = s[C].split(y, factor=factor)
gemv = intrin_gemv(factor, l)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
s[C].reorder(yo, ro, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
def check_rfactor_no_reset_multi_reduction(factor, rfactor):
s = te.create_schedule(C.op)
x, y = C.op.axis
rk = C.op.reduce_axis[0]
yo, yi = s[C].split(y, factor=factor)
ro, ri = s[C].split(rk, factor=rfactor)
roo, roi = s[C].split(ro, factor=2)
s[C].reorder(yo, roo, roi, yi, ri)
gemv = intrin_gemv_no_reset(factor, rfactor)
s[C].tensorize(yi, gemv)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
finfer = tvm.get_global_func("test.op.InferTensorizeRegion")
out_dom, in_dom = finfer(s[C], dom_map)
assert tvm.ir.structural_equal(out_dom[x].extent, 1)
assert tvm.ir.structural_equal(out_dom[y].extent, factor)
assert tvm.ir.structural_equal(out_dom[y].min, yo * factor)
fmatch = tvm.get_global_func("test.op.MatchTensorizeBody")
body = fmatch(s[C], out_dom, in_dom, gemv)
ana = tvm.arith.Analyzer()
assert tvm.ir.structural_equal(ana.simplify(body[0]), ana.simplify(gemv.op.body[0]))
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
tvm.lower(s, [A, B, C])
check(16)
check_rfactor(16, 16)
check_rfactor_no_reset(16, 16)
check_rfactor_no_reset_multi_reduction(16, 16)
# This tests whether algorithm and intrinsics expressions are simplified
# as much as possible first and then checked for equality. See Issue #696
def test_tensorize_op():
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def op_intrin():
bh = 9
bw = 9
x = te.placeholder((5, 5), name="A")
y = te.compute((bh, bw), lambda i, j: x[idxd(j, 3) + idxm(i, 3), idxm(j, 3) + idxd(i, 3)])
def intrin_func(ins, outs):
(xx,) = ins
zz = outs[0]
return tvm.tir.call_packed("op", xx, zz)
return te.decl_tensor_intrin(y.op, intrin_func, default_buffer_params={"offset_factor": 2})
A = te.placeholder((5, 5), name="A")
B = te.compute((9, 9), lambda i, j: A[idxd(j, 3) + idxm(i, 3), idxm(j, 3) + idxd(i, 3)])
bt = op_intrin()
s = te.create_schedule(B.op)
x, y = B.op.axis
s[B].tensorize(x, bt)
s = s.normalize()
tvm.lower(s, [A, B])
# This test asserts that tensorize does not have any effect on
# TensorComputeOp operations
def test_tensorize_tensor_compute_op():
# an intrinsic called "multivadd" whose definition (pattern)
# is a loop of another intrinsic called "vadd"
def intrin_multivadd(n):
n_a = te.var("n_a")
Ab = tvm.tir.decl_buffer((n,), "float32", strides=[n_a])
n_b = te.var("n_b")
Bb = tvm.tir.decl_buffer((n,), "float32", strides=[n_b])
n_c = te.var("n_c")
Cb = tvm.tir.decl_buffer((n,), "float32", strides=[n_c])
z = te.compute(
(n,),
lambda i: tvm.tir.call_extern(
"float32",
"vadd",
Ab.access_ptr("w", offset=n_a * i),
Bb.access_ptr("r", offset=n_b * i),
Cb.access_ptr("r", offset=n_c * i),
),
)
# replace the pattern with the multivadd call. I need to figure out
# how to pass it the right parameters.
def intrin_func(ins, outs):
return tvm.tir.call_packed("multivadd")
return te.decl_tensor_intrin(z.op, intrin_func, name="multivadd")
def intrin_vadd(n):
dtype = "float32"
x = te.placeholder((n,), dtype=dtype, name="vx")
y = te.placeholder((n,), dtype=dtype, name="vy")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
s = te.create_schedule(z.op)
def create_buffer(t):
return tvm.tir.decl_buffer(t.shape, t.dtype, name="W" + t.name, offset_factor=16)
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
"float32",
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(
z.op, intrin_func, binds={x: create_buffer(x), y: create_buffer(y), z: create_buffer(z)}
)
# cache_read, cache_write
M = 1024
factor = 16
dtype = "float32"
A = te.placeholder((M // factor, factor), name="A", dtype=dtype)
B = te.placeholder((M // factor, factor), name="B", dtype=dtype)
vadd = intrin_vadd(factor)
C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]), name="C")
s = te.create_schedule(C.op)
multivadd = intrin_multivadd(64)
s[C].tensorize(C.op.axis[0], multivadd)
s = s.normalize()
dom_map = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, dom_map)
# The loop that we tried to tensorize still exists in the code
# That means tensorize didn't work as expected
assert isinstance(stmt.body, tvm.tir.For)
assert stmt.body.loop_var.name == C.op.axis[0].var.name
if __name__ == "__main__":
test_tensorize_vadd()
test_tensorize_matmul()
test_tensorize_op()
test_tensorize_tensor_compute_op()
| 14,977 | 37.306905 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_aot_legalize_packed_call.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
@tvm.script.ir_module
class Module:
@T.prim_func
def tvm_test_cpacked(
A: T.Buffer((1,), "float32"),
B: T.Buffer((1,), "float32"),
C: T.Buffer((1,), "float32"),
device_context: T.Buffer((1,), "float32"),
) -> T.handle:
T.evaluate(C.data)
@T.prim_func
def tir_packed_call() -> None:
A = T.handle()
B = T.handle()
C = T.handle()
device_context = T.handle()
# body
T.evaluate(
T.tvm_call_cpacked(
"tvm_test_cpacked",
A,
B,
C,
device_context,
dtype="int32",
)
)
@tvm.script.ir_module
class Expected:
@T.prim_func
def tvm_test_cpacked(
A: T.Buffer((1,), "float32"),
B: T.Buffer((1,), "float32"),
C: T.Buffer((1,), "float32"),
device_context: T.Buffer((1,), "float32"),
) -> T.handle:
T.evaluate(C.data)
@T.prim_func
def tir_packed_call() -> None:
A = T.handle()
B = T.handle()
C = T.handle()
device_context = T.handle()
# body
T.evaluate(
T.tvm_call_cpacked(
"tvm_test_cpacked",
T.tvm_stack_make_array(
A,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
T.tvm_stack_make_array(
B,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
T.tvm_stack_make_array(
C,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
),
device_context,
dtype="int32",
)
)
def test_aot_packed_call():
mod = Module
expected = Expected
out = tir.transform.LegalizePackedCalls()(mod)
tvm.ir.assert_structural_equal(expected, out, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| 3,564 | 28.957983 | 69 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_reduction_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.script import tir as T
from tvm.target import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.RewriteReductionBlock(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
@tvm.script.ir_module
class Matmul_before_rewrite:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
C = T.match_buffer(var_C, [512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 32768):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
with T.block("A_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) // 512)
v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":1})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) // 32)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":2})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4)
k = T.axis.reduce(512, i2_1 * 32 + i2_2)
T.reads([C_local[i, j], A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]])
C[v0, v1] = C_local[v0, v1]
@tvm.script.ir_module
class Matmul_after_rewrite:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
C = T.match_buffer(var_C, [512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"):
for i2_0 in T.serial(0, 1):
for ax0_ax1_fused_0 in T.serial(0, 32768):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
with T.block("A_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) // 512)
v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512)
T.reads([A[v0, v1]])
T.writes([A_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":1})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused_0 in T.serial(0, 1024):
for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(0, 2):
with T.block("B_shared"):
v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) // 32)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32)
T.reads([B[v0, v1]])
T.writes([B_shared[v0, v1]])
T.block_attr({"meta_schedule.cooperative_fetch":2})
B_shared[v0, v1] = B[v0, v1]
for i0_3_init, i1_3_init, i0_4_init, i1_4_init in T.grid(2, 2, 16, 2):
with T.block("C_init"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3_init * 16 + i0_4_init)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3_init * 2 + i1_4_init)
T.reads([])
T.writes([C_local[i, j]])
C_local[i, j] = T.float32(0)
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2):
with T.block("C_update"):
i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4)
j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4)
k = T.axis.reduce(512, i2_1 * 32 + i2_2)
T.reads([C_local[i, j], A_shared[i, k], B_shared[k, j]])
T.writes([C_local[i, j]])
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(32, 4):
with T.block("C_local"):
v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0)
v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1)
T.reads([C_local[v0, v1]])
T.writes([C[v0, v1]])
C[v0, v1] = C_local[v0, v1]
@tvm.script.ir_module
class Softmax_cross_thread_reduction:
@T.prim_func
def main(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 8):
for ax1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax1_0 * 32 + ax1_1)
T.reads(T_softmax_maxelem_shared[i0_1], A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_shared[i0_1] = T.max(T_softmax_maxelem_shared[i0_1], A[i0_1, k])
for ax0, ax1_0 in T.grid(1, 8):
for ax1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax1_0 * 32 + ax1_1)
T.reads(T_softmax_expsum_shared[i0_2], A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32")
for i1_0 in T.serial(8):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(A[i0_3, i1], T_softmax_maxelem_shared[i0_3], T_softmax_expsum_shared[i0_3])
T.writes(T_softmax_norm[i0_3, i1])
T.block_attr({"axis":1})
T_softmax_norm[i0_3, i1] = T.exp(A[i0_3, i1] - T_softmax_maxelem_shared[i0_3], dtype="float32") / T_softmax_expsum_shared[i0_3]
# pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
# fmt: on
def test_rewrite_tiled_matmul():
mod = Matmul_before_rewrite
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Matmul_after_rewrite)
def test_rewrite_softmax():
mod = Softmax_cross_thread_reduction
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
# The module should not be rewritten
tvm.ir.assert_structural_equal(sch.mod, Softmax_cross_thread_reduction)
if __name__ == "__main__":
test_rewrite_tiled_matmul()
test_rewrite_softmax()
| 12,819 | 56.232143 | 156 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_plan_update_buffer_allocation_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
from tvm import relay, tir
from tvm.relay.backend.te_compiler import lower_to_primfunc
from tvm.tir.tensor_intrin.hexagon import VRMPY_u8u8i32_INTRIN
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.PlanAndUpdateBufferAllocationLocation()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed)
@T.prim_func
def element_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
C = T.match_buffer(c, (16, 16))
B = T.alloc_buffer((16, 16))
for i0 in range(0, 16):
for j0 in range(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
B[i, j] = A[i, j] + 1.0
for j0 in range(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def transformed_element_func(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 16])
C = T.match_buffer(c, [16, 16])
for i_0 in range(0, 16):
with T.block():
T.reads([A[i_0, 0:16]])
T.writes([C[i_0, 0:16]])
B = T.alloc_buffer([16, 16])
for j_0 in T.serial(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i_0, j_0])
B[i, j] = A[i, j] + 1.0
for j_0 in T.serial(0, 16):
with T.block():
i, j = T.axis.remap("SS", [i_0, j_0])
C[i, j] = B[i, j] * 2.0
@T.prim_func
def original_func() -> None:
A = T.alloc_buffer((128, 128), "float32")
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
if k == 0:
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += (
D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
)
@T.prim_func
def transformed_func() -> None:
A = T.alloc_buffer([128, 128])
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
B = T.alloc_buffer([128, 128])
if k == 0:
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
with T.block(""):
T.reads([B[((i * 4) + ii), ((j * 4) + jj)]])
T.writes([B[((i * 4) + ii), ((j * 4) + jj)]])
C = T.alloc_buffer([128, 128])
for kk in T.serial(0, 4):
B[((i * 4) + ii), ((j * 4) + jj)] = (
B[((i * 4) + ii), ((j * 4) + jj)] + C[((i * 4) + ii), ((k * 4) + kk)]
)
for kk in T.serial(0, 4):
with T.block(""):
T.reads(
[
B[((i * 4) + ii), ((j * 4) + jj)],
C[((i * 4) + ii), ((k * 4) + kk)],
]
)
T.writes([B[((i * 4) + ii), ((j * 4) + jj)]])
D = T.alloc_buffer([128, 128])
B[((i * 4) + ii), ((j * 4) + jj)] = B[
((i * 4) + ii), ((j * 4) + jj)
] + (
D[((j * 4) + jj), ((k * 4) + kk)]
* C[((i * 4) + ii), ((k * 4) + kk)]
)
@T.prim_func
def match_buffer_func() -> None:
C = T.alloc_buffer((128, 128))
for i in range(128):
with T.block():
vi = T.axis.S(128, i)
C0 = T.match_buffer(C[vi, 0:128], (128))
for j in range(128):
with T.block():
jj = T.axis.S(128, j)
C1 = T.match_buffer(C0[jj], ())
C1[()] = 0
@T.prim_func
def transformed_match_buffer_func() -> None:
for i in range(0, 128):
with T.block():
vi = T.axis.S(128, i)
C = T.alloc_buffer((128, 128))
C0 = T.match_buffer(C[vi, 0:128], (128))
for j in range(128):
with T.block():
jj = T.axis.S(128, j)
C1 = T.match_buffer(C0[jj], ())
C1[()] = 0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
A_cache = T.alloc_buffer([1024])
for i in T.serial(0, 8):
with T.block():
vi = T.axis.S(8, i)
with T.block():
v = T.axis.S(8, vi)
T.reads([A[(v * 128) : ((v * 128) + 128)]])
T.writes([A_cache[(v * 128) : ((v * 128) + 128)]])
T.evaluate(
T.call_extern(
"test",
A_cache.data,
(v * 128),
128,
A.data,
(v * 128),
128,
dtype="float32",
)
)
for j in T.serial(0, 128):
with T.block():
v = T.axis.S(1024, vi * 128 + j)
T.reads([A_cache[v]])
T.writes([B[v]])
B[v] = A_cache[v]
@T.prim_func
def transformed_opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024])
B = T.match_buffer(b, [1024])
for i in T.serial(0, 8):
with T.block():
vi = T.axis.S(8, i)
T.reads(A[vi * 128 : vi * 128 + 128])
T.writes(B[vi * 128 : vi * 128 + 128])
A_cache = T.alloc_buffer([1024])
with T.block():
v = T.axis.S(8, vi)
T.reads([A[v * 128 : v * 128 + 128]])
T.writes([A_cache[v * 128 : v * 128 + 128]])
T.evaluate(
T.call_extern(
"test", A_cache.data, v * 128, 128, A.data, v * 128, 128, dtype="float32"
)
)
for j in T.serial(0, 128):
with T.block():
v = T.axis.S(1024, vi * 128 + j)
T.reads([A_cache[v]])
T.writes([B[v]])
B[v] = A_cache[v]
def test_elementwise():
_check(element_func, transformed_element_func)
def test_locate_buffer_allocation():
_check(original_func, transformed_func)
def test_match_buffer_allocation():
_check(match_buffer_func, transformed_match_buffer_func)
def test_opaque_access():
_check(opaque_access, transformed_opaque_access)
def test_lower_te():
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
mod = tvm.tir.transform.PlanAndUpdateBufferAllocationLocation()(orig_mod)
tvm.ir.assert_structural_equal(
mod, orig_mod
) # PlanAndUpdateBufferAllocationLocation should do nothing on TE
def test_loop_carried_dependency():
"""The buffer allocation should be above opaque iter var's loop scopes
such that buffer accesses with loop carried dependencies are covered,
and the allocate buffer should keep the order."""
@T.prim_func
def before(A: T.Buffer((8, 8, 8), "int32"), B: T.Buffer((8, 8, 8), "int32")):
C = T.alloc_buffer([8, 8, 8], dtype="int32")
D = T.alloc_buffer([8, 8, 8], dtype="int32")
for i in T.serial(8):
for j in T.serial(8):
for k in T.serial(8):
with T.block("b0"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] + 1
for k in T.serial(8):
with T.block("b1"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
D[vi, vj, vk] = A[vi, vj, vk] + 2
for k in T.serial(8):
with T.block("b2"):
vi, vk = T.axis.remap("SS", [i, k])
vj = T.axis.opaque(8, j)
B[vi, vj, vk] = (
C[vi, vj, vk]
+ T.if_then_else(0 < vj, C[vi, vj - 1, vk], 0, dtype="int32")
+ D[vi, vj, vk]
)
@T.prim_func
def after(A: T.Buffer((8, 8, 8), "int32"), B: T.Buffer((8, 8, 8), "int32")) -> None:
for i in T.serial(8):
with T.block():
T.reads(A[i, 0:8, 0:8])
T.writes(B[i, 0:8, 0:8])
C = T.alloc_buffer([8, 8, 8], dtype="int32")
D = T.alloc_buffer([8, 8, 8], dtype="int32")
for j in T.serial(8):
for k in T.serial(8):
with T.block("b0"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] + 1
for k in T.serial(8):
with T.block("b1"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
D[vi, vj, vk] = A[vi, vj, vk] + 2
for k in T.serial(8):
with T.block("b2"):
vi, vk = T.axis.remap("SS", [i, k])
vj = T.axis.opaque(8, j)
B[vi, vj, vk] = (
C[vi, vj, vk]
+ T.if_then_else(0 < vj, C[vi, vj - 1, vk], 0, dtype="int32")
+ D[vi, vj, vk]
)
_check(before, after)
def test_1D_cascade_op_rolling_buffer():
"""The intermediate buffer must be allocated above rolling buffer's rolling loop,
which is marked as opaque in consumer block's iter mappings."""
@T.prim_func
def before(A: T.Buffer((4, 16), "int32"), C: T.Buffer((4, 8), "int32")):
B = T.alloc_buffer((4, 6), "int32")
for c in T.serial(4):
for i in T.serial(0, 2):
for j in T.serial(0, 6):
for k in T.serial(3):
with T.block("P1"):
T.where(i < 1 or j >= 2)
cc, vi, vj, vk = T.axis.remap("SSSR", [c, i, j, k])
if vk == 0:
B[cc, T.floormod(vi * 4 + vj, 6)] = 0
B[cc, T.floormod(vi * 4 + vj, 6)] = (
B[cc, T.floormod(vi * 4 + vj, 6)] + A[cc, vi * 4 + vj + vk]
)
for j in T.serial(0, 4):
for k in T.serial(3):
with T.block("P2"):
vi = T.axis.opaque(2, i)
cc, vj, vk = T.axis.remap("SSR", [c, j, k])
if vk == 0:
C[cc, vi * 4 + vj] = 0
C[cc, vi * 4 + vj] = (
C[cc, vi * 4 + vj] + B[cc, T.floormod(vi * 4 + vj + vk, 6)]
)
@T.prim_func
def after(A: T.Buffer((4, 16), "int32"), C: T.Buffer((4, 8), "int32")):
for c in T.serial(4):
with T.block():
T.reads(A[c, 0:12], C[c, 0:8])
T.writes(C[c, 0:8])
B = T.alloc_buffer([4, 6], dtype="int32")
for i in T.serial(2):
for j, k in T.grid(6, 3):
with T.block("P1"):
T.where(i < 1 or j >= 2)
cc, vi, vj, vk = T.axis.remap("SSSR", [c, i, j, k])
if vk == 0:
B[cc, (vi * 4 + vj) % 6] = 0
B[cc, (vi * 4 + vj) % 6] = (
B[cc, (vi * 4 + vj) % 6] + A[cc, vi * 4 + vj + vk]
)
for j, k in T.grid(4, 3):
with T.block("P2"):
vi = T.axis.opaque(2, i)
cc, vj, vk = T.axis.remap("SSR", [c, j, k])
if vk == 0:
C[cc, vi * 4 + vj] = 0
C[cc, vi * 4 + vj] = C[cc, vi * 4 + vj] + B[cc, (vi * 4 + vj + vk) % 6]
_check(before, after)
def test_allocate_const_after_tensorize():
i_size, o_size, h_size, w_size = 64, 64, 56, 56
k_height_size = k_width_size = 3
w_shape = (o_size, i_size, k_height_size, k_width_size)
data = relay.var("data", shape=(1, i_size, h_size, w_size), dtype="uint8")
weight = relay.var("weight", shape=w_shape, dtype="uint8")
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=(k_height_size, k_width_size),
channels=o_size,
padding=(0, 0),
strides=(1, 1),
out_dtype="int32",
)
mod = tvm.IRModule.from_expr(conv2d)
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
weight_np = np.random.uniform(1, 10, size=w_shape).astype("uint8")
target = tvm.target.Target("hexagon")
with tvm.transform.PassContext(opt_level=3):
opt_mod, _ = relay.optimize(mod, params={"weight": weight_np}, target=target)
conv2d_func = opt_mod["main"].body.args[0].op
prim_func = lower_to_primfunc(conv2d_func, target)
sch = tir.Schedule(prim_func)
block = sch.get_block("conv2d_NCHWc_int8")
loops = sch.get_loops(block)
sch.reorder(loops[8], loops[4], loops[-1])
sch.decompose_reduction(block, loops[1])
sch.tensorize(loops[4], VRMPY_u8u8i32_INTRIN)
seq = tvm.transform.Sequential(
[
tvm.tir.transform.LowerInitBlock(),
tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(),
]
)
# The following error is emitted if AllocateConst nodes are not correctly handled:
# Check failed: (buffer_data_to_buffer_.count(source_var)) is false:
_ = seq(sch.mod)
def test_buffer_conditional_lowering():
"""
Confirm that the `tir.PlanAndUpdateBufferAllocationLocation` pass
leaves (Buffer nodes corresponding to pointer-typed PrimFunc arguments)
unchanged, rather than lowering them to `reads`, `writes`, and `alloc_buffer` nodes.
"""
@T.prim_func
def before(A: T.handle("float32")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i in range(1):
A_1 = T.Buffer((1,), data=A)
A_1[i] = 0
after = before
_check(before, after)
if __name__ == "__main__":
tvm.testing.main()
| 16,680 | 36.997722 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_vectorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_vectorize_loop():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_vector():
dtype = "int64"
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32x4", name="A")
with ib.for_range(0, n) as i:
with ib.for_range(0, 4, kind="vectorize") as j:
A[j] = tvm.tir.const(1, A.dtype)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
assert not isinstance(stmt.body, tvm.tir.For)
assert len(stmt.body.indices) == 1
assert isinstance(stmt.body.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.body.value, tvm.tir.Broadcast)
def test_vectorize_with_if():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(x < n):
A[i] = A[i] + 1
with ib.else_scope():
with ib.if_scope(i < n):
A[i] = 2.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.IfThenElse)
assert len(stmt.then_case.indices) == 1
assert isinstance(stmt.then_case.indices[0], tvm.tir.Ramp)
assert isinstance(stmt.then_case.value, tvm.tir.Add)
assert stmt.then_case.value.dtype == "float32x4"
assert isinstance(stmt.else_case, tvm.tir.For)
def test_vectorize_with_if_cond_int64():
m = te.size_var("m", dtype="int64")
A = te.placeholder((m,), name="A", dtype="float32")
B = te.compute((m,), lambda i: te.if_then_else(i < 2, A[i], A[i] * 2), name="B")
s = te.create_schedule(B.op)
x, y = s[B].split(B.op.axis[0], factor=4)
s[B].vectorize(y)
f = tvm.build(s, [A, B], "llvm")
def test_vectorize_let():
v = tvm.tir.Var("v", "float32")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
ib.emit(lambda body: tvm.tir.LetStmt(v, A[i] + 1, body))
A[i] = v + 2
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], ib.get()))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.LetStmt)
assert stmt.value.dtype == "float32x4"
def test_vectorize_with_le_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i <= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_with_ge_cond():
n = te.var("n")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
with ib.if_scope(i >= n):
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
def test_vectorize_if_then_else():
n = te.var("n")
x = te.var("x")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 4, kind="vectorize") as i:
A[i] = tvm.tir.call_intrin("float32", "tir.if_then_else", i > 0, A[i] + 1, A[i])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n, x], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert isinstance(stmt, tvm.tir.For)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, n) as k:
with ib.for_range(0, 4, kind="vectorize") as i:
A[k * 4 + i] = tvm.tir.call_intrin(
"float32", "tir.if_then_else", k > 0, A[k * 4 + i], 0
)
stmt = ib.get()
assert isinstance(stmt.body, tvm.tir.For)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, n], stmt))
stmt = tvm.tir.transform.VectorizeLoop()(mod)["main"].body
assert not isinstance(stmt.body, tvm.tir.For)
assert isinstance(stmt.body.value.args[2], tvm.tir.Broadcast)
def test_vectorize_while_fail():
"""A while loop inside a vectorized loop should fail."""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.for_range(0, n, kind="vectorize") as j:
with ib.while_loop(i[0] < num_iter):
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
try:
tvm.lower(s, [A, B, C], "llvm")
assert False
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
expected = "A while loop inside a vectorized loop not supported"
assert expected in error_msg
def test_vectorize_dtype_mismatch():
n = tvm.tir.IntImm("int64", 4)
A = te.compute((n,), lambda i: tvm.tir.IntImm("int64", 2**31 - 1) + i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[0])
tvm.lower(s, [A], "llvm", simple_mode=True)
if __name__ == "__main__":
test_vectorize_vector()
test_vectorize_with_if()
test_vectorize_loop()
test_vectorize_if_then_else()
test_vectorize_with_le_cond()
test_vectorize_with_ge_cond()
test_vectorize_let()
test_vectorize_while_fail()
test_vectorize_dtype_mismatch()
| 7,739 | 31.384937 | 88 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_regression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy
import tvm
import tvm.testing
from tvm.script import tir as T
# This numpy array is used to test the comparison between the global objects and the
# `tvm.script.tir` submodule.
np_array = numpy.array([0, 1, 2, 3])
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_multi_element_array_in_outmost_namespace():
func = matmul
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func)
def test_different_dtype_assignment_to_var():
@T.prim_func
def test_case():
a = T.alloc_buffer((10, 10), dtype="int8")
@T.prim_func
def func_ref():
a = T.alloc_buffer([10, 10], dtype="int8")
T.evaluate(0)
tvm.ir.assert_structural_equal(test_case, func_ref)
def test_var_capturing_order():
b = 2
@T.prim_func
def test_case():
k: T.int32 = b
@T.prim_func
def func_ref():
k: T.int32 = 2
T.evaluate(0)
tvm.ir.assert_structural_equal(test_case, func_ref)
def test_tir_buffer_region_extent_correct_dtype():
@T.prim_func
def func(A: T.Buffer((T.int64(16), T.int64(1)), "float32")):
for i in T.grid(T.int64(16)):
with T.block("block"):
vi = T.axis.remap("S", [i])
T.reads(A[vi, T.int64(0) : T.int64(1)])
T.evaluate(0)
assert func.body.block.body.body.block.reads[0].region[0].extent.dtype == "int64"
if __name__ == "__main__":
tvm.testing.main()
| 2,661 | 28.910112 | 85 | py |
tvm | tvm-main/tests/python/unittest/test_filter_untracked.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import sys
import tempfile
def setup_git_repo(worktree=False):
git_repo_dir = tempfile.mkdtemp()
to_rm = [git_repo_dir]
try:
subprocess.check_output(["git", "init", "."], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed", "w") as committed_f:
committed_f.write("normal committed file\n")
subprocess.check_output(["git", "add", "committed"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/committed-ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored, but committed already")
subprocess.check_output(["git", "add", "committed-ignored"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/.gitignore", "w") as gitignore_f:
gitignore_f.write("ignored\n" "committed-ignored\n")
subprocess.check_output(["git", "add", ".gitignore"], cwd=git_repo_dir)
# NOTE: explicitly set the author so this test passes in the CI.
subprocess.check_output(
[
"git",
"-c",
"user.name=Unit Test",
"-c",
"user.email=unit.test@testing.tvm.ai",
"commit",
"-m",
"initial commit",
],
cwd=git_repo_dir,
)
if worktree:
worktree_dir = tempfile.mkdtemp()
to_rm.append(worktree_dir)
subprocess.check_output(["git", "worktree", "add", worktree_dir], cwd=git_repo_dir)
git_repo_dir = worktree_dir
with open(f"{git_repo_dir}/ignored", "w") as gitignore_f:
gitignore_f.write("this file is gitignored")
with open(f"{git_repo_dir}/added-to-index", "w") as added_f:
added_f.write("only added to git index\n")
subprocess.check_output(["git", "add", "added-to-index"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/ignored-added-to-index", "w") as ignored_f:
ignored_f.write("this file is gitignored but in the index already\n")
subprocess.check_output(["git", "add", "-f", "ignored-added-to-index"], cwd=git_repo_dir)
with open(f"{git_repo_dir}/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
os.mkdir(f"{git_repo_dir}/subdir")
with open(f"{git_repo_dir}/subdir/untracked", "w") as untracked_f:
untracked_f.write("this file is untracked\n")
with open(f"{git_repo_dir}/subdir/untracked2", "w") as untracked_f:
untracked_f.write("this file is also untracked\n")
return git_repo_dir, to_rm
except Exception:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
raise
def run_test(repo_path, passed_files, filtered_files):
test_input = (
"\n".join(
passed_files
+ filtered_files
+ [f"./{f}" for f in passed_files]
+ [f"./{f}" for f in filtered_files]
)
+ "\n"
)
test_script_dir = f"{repo_path}/test-script-dir"
os.mkdir(test_script_dir)
filter_script_path = f"{test_script_dir}/filter_untracked.py"
test_script_dirname = os.path.dirname(__file__) or os.getcwd()
shutil.copy(
os.path.realpath(f"{test_script_dirname}/../../lint/filter_untracked.py"),
filter_script_path,
)
filter_proc = subprocess.Popen(
[sys.executable, filter_script_path],
cwd=repo_path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
)
filter_output, _ = filter_proc.communicate(test_input)
filter_output_lines = [l for l in filter_output.split("\n") if l]
for pass_f in passed_files:
assert (
pass_f in filter_output_lines
), f"expected in filter output: {pass_f}\filter output: {filter_output}"
assert (
f"./{pass_f}" in filter_output_lines
), f"expected in filter output: ./{pass_f}\filter output: {filter_output}"
for filter_f in filtered_files:
assert (
filter_f not in filter_output_lines
), f"expected not in filter output: {filter_f}\nfilter_output: {filter_output}"
assert (
f"./{filter_f}" not in filter_output_lines
), f"expected not in filter output: ./{filter_f}\nfilter_output: {filter_output}"
assert len(filter_output_lines) == 2 * len(
passed_files
), f"expected {len(filter_output_lines)} == 2 * {len(passed_files)}"
def test_filter_untracked():
repo_path, to_rm = setup_git_repo()
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
def test_worktree():
repo_path, to_rm = setup_git_repo(worktree=True)
try:
passed_files = [
"committed",
"committed-ignored",
"added-to-index",
"ignored-added-to-index",
]
filtered_files = [
"ignored",
"untracked",
"subdir/untracked",
"subdir/untracked2",
".git",
]
run_test(repo_path, passed_files, filtered_files)
finally:
for rm_dir in to_rm:
shutil.rmtree(rm_dir)
if __name__ == "__main__":
test_filter_untracked()
test_worktree()
| 6,471 | 31.852792 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_arith_const_int_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_dtype_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int64")
bd = analyzer.const_int_bound(x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
x = te.var("x", dtype="int8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == -128
assert bd.max_value == 127
x = te.var("x", dtype="uint8")
bd = analyzer.const_int_bound(x)
assert bd.min_value == 0
assert bd.max_value == 255
def test_cast_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int8")
tmod = tvm.tir.truncmod
bd = analyzer.const_int_bound(tmod(x, 3).astype("uint32"))
assert bd.min_value == 0
assert bd.max_value == 2
bd = analyzer.const_int_bound(tmod(x, 3).astype("float32").astype("int32"))
assert bd.min_value == -2
assert bd.max_value == 2
def test_add_sub_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x", "int64"), te.var("y", "int64")
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(0, 4))
analyzer.update(y, tvm.arith.ConstIntBound(1, 10))
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == 1
assert bd.max_value == 14
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == 3
analyzer.update(x, tvm.arith.ConstIntBound(0, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x - y)
assert bd.min_value == -10
assert bd.max_value == bd.POS_INF
bd = analyzer.const_int_bound(1 - x)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 1
## constants with negative or positive max(int64) occassionally show up
## in models, this is to ensure we can handle those cases
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.NEG_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(bd.POS_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
bd = analyzer.const_int_bound(x + y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_mul_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-2, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(x * y + 20)
assert bd.min_value == 0
assert bd.max_value == 60
analyzer.update(x, tvm.arith.ConstIntBound(-3, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == -32
assert bd.max_value == 24
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-8, 2), override=True)
bd = analyzer.const_int_bound(x * y)
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
def test_truncdiv_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -2
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 0), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -4
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 1), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-4, 12), override=True)
bd = analyzer.const_int_bound(tdiv(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
def test_truncmod_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == -9
assert bd.max_value == 4
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tmod(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
def test_floordiv_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
fld = tvm.te.floordiv
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -9 // 4
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 0), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -4
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-2, 1), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(-4, 12), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == -9
assert bd.max_value == 9
# Test handling unsigned integers well
x, y = te.var("x", dtype="uint32"), te.var("y", dtype="uint32")
analyzer.update(x, tvm.arith.ConstIntBound(1, 4), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(0, 12), override=True)
bd = analyzer.const_int_bound(fld(x, y))
assert bd.min_value == 0
assert bd.max_value == 4
def test_floormod_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
flm = tvm.te.floormod
analyzer.update(x, tvm.arith.ConstIntBound(-9, 4))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(flm(x, y))
assert bd.min_value == 0
assert bd.max_value == 9
def test_min_max_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tvm.te.min(x, y))
assert bd.min_value == -9
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(bd.NEG_INF, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.te.min(x, y))
assert bd.min_value == bd.NEG_INF
assert bd.max_value == 10
bd = analyzer.const_int_bound(tvm.te.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
analyzer.update(x, tvm.arith.ConstIntBound(1, bd.POS_INF), override=True)
analyzer.update(y, tvm.arith.ConstIntBound(4, 10), override=True)
bd = analyzer.const_int_bound(tvm.te.max(x, y))
assert bd.min_value == 4
assert bd.max_value == bd.POS_INF
def test_select_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(4, 10))
bd = analyzer.const_int_bound(tvm.tir.Select(x > 1, (y < 0).astype("int32"), y + 1))
assert bd.min_value == 0
assert bd.max_value == 11
def test_shift_and_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
analyzer.update(x, tvm.arith.ConstIntBound(-9, 11))
analyzer.update(y, tvm.arith.ConstIntBound(2, 10))
bd = analyzer.const_int_bound(x >> y)
assert bd.min_value == -3
assert bd.max_value == 2
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
analyzer.update(x, tvm.arith.ConstIntBound(10, 11), override=True)
bd = analyzer.const_int_bound(x & y)
assert bd.min_value == 0
assert bd.max_value == 10
def test_mix_index_bound():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
tmod = tvm.tir.truncmod
analyzer.update(x, tvm.arith.ConstIntBound(0, 24 - 1))
analyzer.update(y, tvm.arith.ConstIntBound(0, 3 - 1))
bd = analyzer.const_int_bound(tmod(x, 8) + tdiv(x, 8) * 8)
assert bd.min_value == 0
assert bd.max_value == 24 - 1
bd = analyzer.const_int_bound(y + x * 3)
assert bd.min_value == 0
assert bd.max_value == 24 * 3 - 1
bd = analyzer.const_int_bound(tmod(x, 7) + tdiv(x, 7) * 7)
assert bd.min_value == 0
assert bd.max_value == (23 // 7) * 7 + 6
def test_size_var_bound():
analyzer = tvm.arith.Analyzer()
x = te.size_var("x")
bd = analyzer.const_int_bound(x)
assert bd.min_value == 0
assert bd.max_value == bd.POS_INF
def test_let_bound():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
bd = analyzer.const_int_bound(tvm.tir.Let(x, 1, x + 1))
assert bd.min_value == 2
assert bd.max_value == 2
def test_floormod_negative_divisor():
analyzer = tvm.arith.Analyzer()
flm, fld = tvm.te.floormod, tvm.te.floordiv
a, b = te.var("a"), te.var("b")
analyzer.update(a, tvm.arith.ConstIntBound(0, 6))
analyzer.update(b, tvm.arith.ConstIntBound(-5, 7))
bd = analyzer.const_int_bound(flm(a, b))
assert bd.min_value == -4
assert bd.max_value == 6
def test_divmod_assume_no_zero_divsor():
# Divmod non negative expression makes assumption that divide by zero won't occur
# this assumption is important to get best result from symbolic shape programs
analyzer = tvm.arith.Analyzer()
flm, fld = tvm.te.floormod, tvm.te.floordiv
a, b = te.var("a"), te.var("b")
analyzer.update(a, tvm.arith.ConstIntBound(0, 6))
analyzer.update(b, tvm.arith.ConstIntBound(0, tvm.arith.ConstIntBound.POS_INF))
bd = analyzer.const_int_bound(fld(a, b))
assert bd.min_value == 0
assert bd.max_value == 6
bd = analyzer.const_int_bound(flm(a, b))
assert bd.min_value == 0
assert bd.max_value == 6
def test_multiple_condition():
analyzer = tvm.arith.Analyzer()
flm, fld = tvm.te.floormod, tvm.te.floordiv
a = te.var("a")
analyzer.update(a, tvm.arith.ConstIntBound(0, 128))
with analyzer.constraint_scope(tvm.tir.all(1 <= flm(a, 58), flm(a, 58) < 57)):
bound = analyzer.const_int_bound(flm(a, 58) - 1)
assert bound.min_value == 0
def test_broadcast_bound():
analyzer = tvm.arith.Analyzer()
a = te.var("a")
analyzer.update(a, tvm.arith.ConstIntBound(0, 128))
bound = analyzer.const_int_bound(tvm.tir.Broadcast(a, 4))
assert bound.min_value == 0
assert bound.max_value == 128
def test_ramp_bound():
analyzer = tvm.arith.Analyzer()
a = te.var("a")
analyzer.update(a, tvm.arith.ConstIntBound(0, 128))
bound = analyzer.const_int_bound(tvm.tir.Ramp(a, 2, 4) + 2)
assert bound.min_value == 2
assert bound.max_value == 128 + 2 * 3 + 2
if __name__ == "__main__":
tvm.testing.main()
| 13,315 | 33.231362 | 88 | py |
tvm | tvm-main/tests/python/unittest/test_tir_nodes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import ir, te
def test_const():
x = tvm.tir.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_te_const():
x = tvm.te.const(1, "int32")
assert x.dtype == "int32"
assert isinstance(x, tvm.tir.IntImm)
def test_scalar_dtype_inference():
for data in [
True,
bool(1),
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
]:
assert tvm.tir.const(data).dtype == str(np.array(data).dtype)
assert tvm.tir.const(1).dtype == "int32"
assert tvm.tir.const(1.0).dtype == "float32"
for data in [
True,
bool(1),
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
np.float16(1),
np.float32(1),
np.float64(1),
]:
assert tvm.runtime.convert(data).dtype == str(np.array(data).dtype)
assert tvm.runtime.convert(1).dtype == "int32"
assert tvm.runtime.convert(1.0).dtype == "float32"
def test_make():
x = tvm.tir.const(1, "int32")
y = te.var("x")
z = x + y
assert isinstance(tvm.tir.max(x, y), tvm.tir.Max)
assert isinstance(tvm.tir.min(x, y), tvm.tir.Min)
def test_ir():
x = tvm.tir.const(1, "int32")
y = tvm.tir.IntImm("int32", 1)
z = x + y
stmt = tvm.tir.Evaluate(z)
assert isinstance(stmt, tvm.tir.Evaluate)
def test_ir2():
buf_size = te.var("size")
x = te.var("n")
storage_type = ir.PrimType("int32")
handle_type = ir.PointerType(storage_type)
array = te.var("array", handle_type)
buf = tvm.tir.decl_buffer([buf_size], "int32", data=array)
st = tvm.tir.BufferStore(buf, x + 1, [1])
assert isinstance(st, tvm.tir.BufferStore)
assert st.buffer == buf
assert st.buffer.data == array
def test_let():
x = te.var("x")
y = te.var("y")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
def test_cast():
x = te.var("x", dtype="float32")
y = x.astype("int32")
z = x.astype("float32x4")
assert isinstance(y, tvm.tir.Cast)
assert isinstance(z, tvm.tir.Broadcast)
assert z.lanes == 4
s = tvm.tir.StringImm("s")
with pytest.raises(tvm.error.TVMError):
try:
s.astype("int")
except Exception as e:
assert "Can't cast a handle to other types" in str(e)
raise
def test_attr():
x = te.var("x")
y = te.var("y")
stmt = tvm.tir.AttrStmt(y, "stride", 10, tvm.tir.Evaluate(x + 1))
assert stmt.node == y
a = tvm.runtime.convert(1)
assert a.value == 1
try:
a.no_field
assert False
except AttributeError:
pass
def test_basic():
a = te.var("a")
b = te.var("b")
c = a + b
assert str(c) == "%s + %s" % (a.name, b.name)
def test_stmt():
x = tvm.tir.Evaluate(0)
tvm.tir.For(te.var("i"), 0, 1, tvm.tir.ForKind.SERIAL, x)
def test_dir():
x = te.var("x")
dir(x)
def test_dtype():
x = te.var("x")
assert x.dtype == "int32"
y = te.var("y")
assert (x > y).dtype == "bool"
def test_any():
x = te.var("x")
y = te.var("y")
z = te.var("z")
try:
t = x or x
assert False
except ValueError:
pass
try:
tvm.tir.any()
assert False
except ValueError:
pass
assert str(tvm.tir.any(x < y)) == "%s < %s" % (x.name, y.name)
assert str(tvm.tir.any(x < y, x > z)) == "%s < %s or %s > %s" % (
x.name,
y.name,
x.name,
z.name,
)
assert str(
tvm.tir.any(x < y, y > z + 1, x < z * 2)
) == "%s < %s or %s > %s + 1 or %s < %s * 2" % (
x.name,
y.name,
y.name,
z.name,
x.name,
z.name,
)
def test_all():
x = te.var("x")
y = te.var("y")
z = te.var("z")
try:
t = x and x
assert False
except ValueError:
pass
try:
tvm.tir.all()
assert False
except ValueError:
pass
assert str(tvm.tir.all(x < y)) == "%s < %s" % (x.name, y.name)
assert str(tvm.tir.all(x < y, x > z)) == "%s < %s and %s > %s" % (
x.name,
y.name,
x.name,
z.name,
)
assert str(
tvm.tir.all(x < y, y > z + 1, x < z * 2)
) == "%s < %s and %s > %s + 1 and %s < %s * 2" % (
x.name,
y.name,
y.name,
z.name,
x.name,
z.name,
)
def test_bitwise():
x = te.var("x")
y = te.var("y")
assert str(x << y) == "T.shift_left(x, y)"
assert str(x >> y) == "T.shift_right(x, y)"
assert str(x & y) == "T.bitwise_and(x, y)"
assert str(x | y) == "T.bitwise_or(x, y)"
assert str(x ^ y) == "T.bitwise_xor(x, y)"
assert str(10 & x) == "T.bitwise_and(10, x)"
assert str(10 | x) == "T.bitwise_or(10, x)"
assert str(10 ^ x) == "T.bitwise_xor(10, x)"
assert str(10 >> x) == "T.shift_right(10, x)"
assert str(10 << x) == "T.shift_left(10, x)"
assert str(10 % x) == "10 % x"
assert str(~x) == "T.bitwise_not(x)"
assert (tvm.tir.const(1, "int8x2") >> 1).dtype == "int8x2"
assert (x >> tvm.tir.const(1, "int32x2")).dtype == "int32x2"
assert (te.var("z", "int8x2") << tvm.tir.const(1, "int8x2")).dtype == "int8x2"
def test_float_bitwise():
t = tvm.tir.const(1.5, dtype="float32")
for test in [
lambda lhs, rhs: lhs << rhs,
lambda lhs, rhs: lhs >> rhs,
lambda lhs, rhs: lhs | rhs,
lambda lhs, rhs: lhs ^ rhs,
lambda lhs, rhs: lhs & rhs,
]:
try:
test(t, 10.0)
assert False
except tvm.TVMError:
pass
try:
~t
assert False
except RuntimeError:
pass
def test_shift_bounds():
x = te.var("x")
for test in [lambda lhs, rhs: lhs << rhs, lambda lhs, rhs: lhs >> rhs]:
# negative case
for testcase in [(x, -1), (x, 32)]:
try:
test(*testcase)
assert False
except tvm.TVMError:
pass
# positive case
for testcase in [(x, 0), (x, 16), (x, 31)]:
test(*testcase)
def test_divide_by_zero():
for test in [
lambda lhs, rhs: tvm.tir.floormod(lhs, rhs),
lambda lhs, rhs: tvm.tir.floordiv(lhs, rhs),
lambda lhs, rhs: tvm.tir.truncmod(lhs, rhs),
lambda lhs, rhs: tvm.tir.truncdiv(lhs, rhs),
lambda lhs, rhs: tvm.tir.div(lhs, rhs),
]:
try:
test(tvm.tir.const(5, "int32"), tvm.tir.const(0, "int32"))
assert False
except tvm.TVMError:
pass
def test_infinity():
assert str(tvm.tir.infinity("float16")) == 'T.float16("inf")'
assert str(tvm.tir.infinity("float32")) == 'T.float32("inf")'
assert str(tvm.tir.infinity("float64")) == 'T.float64("inf")'
def test_isnan():
x = te.var("x", "float32")
assert str(tvm.tir.isnan(x)) == "T.isnan(x)"
assert str(tvm.tir.isnan(x).dtype) == "bool"
y = te.var("y", "float16")
assert str(tvm.tir.isnan(y)) == 'T.isnan(T.Cast("float32", y))'
z = te.var("z", "int32")
assert str(tvm.tir.isnan(z)) == "T.bool(False)"
k = te.var("k", "int8x2")
assert str(tvm.tir.isnan(k).dtype) == "uint1x2"
def test_equality():
a = te.var("a")
b = te.var("b")
c = a == b
assert not c
d = c != c
assert not d
def test_equality_string_imm():
x = "a"
y = tvm.tir.StringImm(x)
x == y.value
x == y
def test_prim_func():
x = te.var("x")
y = te.var("y")
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func = tvm.tir.PrimFunc([x, y, b], stmt)
# make sure we can print
assert func.buffer_map[func.params[2]].same_as(b)
assert len(func.buffer_map) == 1
f2 = func.with_attr({"calling_conv": 1, "tir.noalias": True})
assert f2.attrs["calling_conv"].value == 1
assert func.attrs is None
def test_vars():
x = tvm.tir.Var("xyz", "int8")
assert x.dtype == "int8"
ptype = tvm.ir.PointerType(tvm.ir.PrimType("float"))
x = tvm.tir.Var("xyz", ptype)
assert x.dtype == "handle"
assert x.type_annotation == ptype
assert isinstance(ptype.element_type, tvm.ir.PrimType)
def test_scoped_storage_vars():
dtype = "float"
storage_scope = "global.texture"
ptype = tvm.ir.PointerType(tvm.ir.PrimType(dtype), storage_scope)
x = tvm.tir.Var("xyz", ptype)
assert x.dtype == "handle"
assert x.type_annotation == ptype
assert x.type_annotation.storage_scope == storage_scope
assert isinstance(ptype.element_type, tvm.ir.PrimType)
def test_buffer_load_store():
b = tvm.tir.decl_buffer((10,), "float32")
x = tvm.tir.BufferLoad(b, [0])
assert isinstance(x, tvm.tir.BufferLoad)
assert x.dtype == "float32"
assert x.buffer == b
s = tvm.tir.BufferStore(b, 0.1, [0])
assert isinstance(s, tvm.tir.BufferStore)
s = tvm.tir.BufferRealize(b, [tvm.ir.Range(0, 1)], True, tvm.tir.Evaluate(0))
assert isinstance(s, tvm.tir.BufferRealize)
def test_intimm_cond():
x = tvm.runtime.convert(1)
y = tvm.runtime.convert(1)
s = {x}
assert y in s
assert x == y
assert x < 20
assert not (x >= 20)
assert x < 10 and y < 10
assert not tvm.runtime.convert(x != 1)
assert x == 1
if __name__ == "__main__":
tvm.testing.main()
| 10,476 | 24.805419 | 82 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_state.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import gc
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir import IRModule
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
def replace_ir_builder(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
s = tir.ScheduleState(new_func, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_module(deep_copy=False, realize=False):
new_func = tvm.script.from_source(elementwise.script())
other_func = tvm.script.from_source(elementwise.script())
mod = IRModule(functions={"main": new_func, "other": other_func})
s = tir.ScheduleState(mod, debug_mask="all")
target = tvm.tir.Block(
iter_vars=[],
reads=[],
writes=[],
name_hint="target",
body=s.mod["main"].body.block.body[1],
init=None,
alloc_buffers=None,
match_buffers=None,
annotations=None,
)
if realize:
target = tvm.tir.BlockRealize(
iter_values=[],
predicate=True,
block=target,
)
if deep_copy:
target.__setstate__(target.__getstate__())
gc.collect()
return s, target
def replace_ir_builder_with_opaque():
func = tvm.script.from_source(block_in_opaque_block.script())
s = tir.ScheduleState(func, debug_mask="all")
gc.collect()
return s
def test_replace_direct_write0():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
# There is no other reference so the AST node can be written directly
assert old_hash == s.mod["main"].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
# The target reuse the stmt of the sref, so the sref won't be None
assert sref.stmt is not None
def test_replace_direct_write1():
s, target = replace_ir_builder(realize=True)
old_hash = s.mod["main"].body.block.body.__hash__()
hold_ref = s.mod["main"].body.block.body[1]
sref = s.get_sref(s.mod["main"].body.block.body[1])
s.replace(sref, target)
# There is no other reference so the AST node can be written directly
assert old_hash == s.mod["main"].body.block.body.__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[1], target)
# The target reuse `sref.stmt`, so the sref won't be None
assert sref.stmt is not None
def test_replace_copy():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].__hash__()
# We hold another reference of func
old_func = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
# We need to copy the whole func to remain the old_func unchanged
assert old_hash != s.mod["main"].__hash__()
assert not tvm.ir.structural_equal(old_func.body, s.mod["main"].body)
assert old_hash == old_func.__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_partial_copy0():
s, target = replace_ir_builder(deep_copy=True, realize=True)
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0]
ref_old_hash = hold_ref.__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
# The stmt is held by `hold_sref`, so it will be coped in copy-on-write
# because the ref count is not unique
assert ref_old_hash != s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# The function and the other part stmt can be directly written
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body, target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_partial_copy1():
s, target = replace_ir_builder(deep_copy=True)
func_old_hash = s.mod["main"].__hash__()
hold_ref = s.mod["main"].body.block.body[0].body
stmt_old_hash = s.mod["main"].body.block.body[0].__hash__()
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
other_part_hash = s.mod["main"].body.block.body[1].__hash__()
s.replace(sref, target)
# The parent stmt will change since there is only one reference
assert stmt_old_hash == s.mod["main"].body.block.body[0].__hash__()
assert not tvm.ir.structural_equal(hold_ref.body, target)
# The function and the other part stmt can be directly written
assert func_old_hash == s.mod["main"].__hash__()
assert other_part_hash == s.mod["main"].body.block.body[1].__hash__()
# Check the replaced part is equal to the target
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0].body.body.block, target)
# The replaced AST node will be deleted, so the ref will be None
assert sref.stmt is None
def test_replace_root_write():
s, target = replace_ir_builder()
old_hash = s.mod["main"].__hash__()
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check no copy and the new body equals to target
assert old_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
def test_replace_root_copy0():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod["main"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy1():
s, target = replace_ir_builder(deep_copy=True, realize=True)
old_hash = s.mod["main"].body.block.__hash__()
func_ref = s.mod["main"].body.block
sref = s.get_sref(s.mod["main"].body.block.body[0])
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].body.block.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block.body[0], target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
def test_replace_root_copy2():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.functions.__hash__()
func_ref = s.mod.functions
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod.functions.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
for _, v in func_ref.items():
assert not tvm.ir.structural_equal(v.body.block, target)
def test_replace_root_copy3():
s, target = replace_ir_builder(deep_copy=True)
old_hash = s.mod.__hash__()
func_ref = s.mod
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod.__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref["main"].body.block, target)
def test_replace_block_remap():
func = elementwise
s = tir.ScheduleState(func, debug_mask="all")
# The target stmt
target = matmul.body.block.body.body.body[0].block
sref = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
s.replace(sref, target, {sref.stmt: target})
sref_new = s.get_sref(s.mod["main"].body.block.body[0].body.body.block)
# Check the original sref has been remapped
assert sref.__hash__() == sref_new.__hash__()
tvm.ir.assert_structural_equal(sref.stmt, target)
def test_replace_block_in_opaque_block():
s = replace_ir_builder_with_opaque()
root_hash = s.mod["main"].__hash__()
for_loop = s.mod["main"].body.block.body.body.block.body[1].then_case.block.body
sref = s.get_sref(for_loop)
new_for_loop = tir.For(
loop_var=for_loop.loop_var,
min_val=0,
extent=128,
kind=tir.ForKind.SERIAL,
body=tir.Evaluate(0),
thread_binding=None,
annotations=None,
)
s.replace(sref, new_for_loop)
assert root_hash == s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(sref.stmt, new_for_loop)
def test_replace_ir_module():
s, target = replace_ir_builder_module(deep_copy=True)
old_hash = s.mod["main"].__hash__()
other_func_hash = s.mod["other"].__hash__()
func_ref = s.mod["main"]
sref = s.get_sref(s.mod["main"].body.block)
s.replace(sref, target)
# Check the new body equals to target
assert old_hash != s.mod["main"].__hash__()
tvm.ir.assert_structural_equal(s.mod["main"].body.block, target)
# Check the original func remains unchanged
assert old_hash == func_ref.__hash__()
assert not tvm.ir.structural_equal(func_ref.body, target)
assert other_func_hash == s.mod["other"].__hash__()
if __name__ == "__main__":
tvm.testing.main()
| 13,521 | 36.983146 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_micro_project_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import io
import json
import sys
import unittest
from unittest import mock
import pytest
import tvm
import tvm.testing
# Implementing as a fixture so that the tvm.micro import doesn't occur
# until fixture setup time. This is necessary for pytest's collection
# phase to work when USE_MICRO=OFF, while still explicitly listing the
# tests as skipped.
@tvm.testing.fixture
def BaseTestHandler():
from tvm.micro import project_api
class BaseTestHandler_Impl(project_api.server.ProjectAPIHandler):
DEFAULT_TEST_SERVER_INFO = project_api.server.ServerInfo(
platform_name="platform_name",
is_template=True,
model_library_format_path="./model-library-format-path.sh",
project_options=[
project_api.server.ProjectOption(
name="foo", optional=["build"], type="bool", help="Option foo"
),
project_api.server.ProjectOption(
name="bar",
required=["generate_project"],
type="str",
choices=["qux"],
help="Option bar",
),
],
)
def server_info_query(self, tvm_version):
return self.DEFAULT_TEST_SERVER_INFO
def generate_project(self, model_library_format_path, crt_path, project_path, options):
assert False, "generate_project is not implemented for this test"
def build(self, options):
assert False, "build is not implemented for this test"
def flash(self, options):
assert False, "flash is not implemented for this test"
def open_transport(self, options):
assert False, "open_transport is not implemented for this test"
def close_transport(self, options):
assert False, "open_transport is not implemented for this test"
def read_transport(self, n, timeout_sec):
assert False, "read_transport is not implemented for this test"
def write_transport(self, data, timeout_sec):
assert False, "write_transport is not implemented for this test"
return BaseTestHandler_Impl
class Transport:
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return False
closed = False
def __init__(self):
self.data = bytearray()
self.rpos = 0
self.items = []
def read(self, size=-1):
to_read = len(self.data) - self.rpos
if size != -1:
to_read = min(size, to_read)
rpos = self.rpos
self.rpos += to_read
return self.data[rpos : self.rpos]
def write(self, data):
self.data.extend(data)
class ClientServerFixture:
def __init__(self, handler):
from tvm.micro import project_api
self.handler = handler
self.client_to_server = Transport()
self.server_to_client = Transport()
self.server = project_api.server.ProjectAPIServer(
self.client_to_server, self.server_to_client, handler
)
self.client = project_api.client.ProjectAPIClient(
self.server_to_client,
self.client_to_server,
testonly_did_write_request=self._process_server_request,
)
self.expect_failure = False
def _process_server_request(self):
assert self.server.serve_one_request() == (
not self.expect_failure
), "Server failed to process request"
@tvm.testing.requires_micro
def test_server_info_query(BaseTestHandler):
fixture = ClientServerFixture(BaseTestHandler())
# Examine reply explicitly because these are the defaults for all derivative test cases.
reply = fixture.client.server_info_query(tvm.__version__)
assert reply["protocol_version"] == 1
assert reply["platform_name"] == "platform_name"
assert reply["is_template"] == True
assert reply["model_library_format_path"] == "./model-library-format-path.sh"
assert reply["project_options"] == [
{
"name": "foo",
"choices": None,
"default": None,
"type": "bool",
"required": None,
"optional": ["build"],
"help": "Option foo",
},
{
"name": "bar",
"choices": ["qux"],
"default": None,
"type": "str",
"required": ["generate_project"],
"optional": None,
"help": "Option bar",
},
]
@tvm.testing.requires_micro
def test_server_info_query_wrong_tvm_version(BaseTestHandler):
from tvm.micro import project_api
def server_info_query(tvm_version):
raise project_api.server.UnsupportedTVMVersionError()
with mock.patch.object(BaseTestHandler, "server_info_query", side_effect=server_info_query):
fixture = ClientServerFixture(BaseTestHandler())
with pytest.raises(project_api.server.UnsupportedTVMVersionError) as exc_info:
fixture.client.server_info_query(tvm.__version__)
assert "UnsupportedTVMVersionError" in str(exc_info.value)
@tvm.testing.requires_micro
def test_server_info_query_wrong_protocol_version(BaseTestHandler):
from tvm.micro import project_api
ServerInfoProtocol = collections.namedtuple(
"ServerInfoProtocol", list(project_api.server.ServerInfo._fields) + ["protocol_version"]
)
def server_info_query(tvm_version):
return ServerInfoProtocol(
protocol_version=0, **BaseTestHandler.DEFAULT_TEST_SERVER_INFO._asdict()
)
with mock.patch.object(BaseTestHandler, "server_info_query", side_effect=server_info_query):
fixture = ClientServerFixture(BaseTestHandler())
with pytest.raises(project_api.client.UnsupportedProtocolVersionError) as exc_info:
fixture.client.server_info_query(tvm.__version__)
assert "microTVM API Server supports protocol version 0; want 1" in str(exc_info.value)
@tvm.testing.requires_micro
def test_base_test_handler(BaseTestHandler):
"""All methods should raise AssertionError on BaseTestHandler."""
fixture = ClientServerFixture(BaseTestHandler())
for method in dir(fixture.handler):
if method.startswith("_") or not callable(method) or method == "server_info_query":
continue
with self.assertThrows(AssertionError) as exc_info:
getattr(fixture.client, method)()
assert (exc_info.exception) == f"{method} is not implemented for this test"
@tvm.testing.requires_micro
def test_build(BaseTestHandler):
with mock.patch.object(BaseTestHandler, "build", return_value=None) as patch:
fixture = ClientServerFixture(BaseTestHandler())
fixture.client.build(options={"bar": "baz"})
fixture.handler.build.assert_called_once_with(options={"bar": "baz"})
@tvm.testing.requires_micro
def test_flash(BaseTestHandler):
with mock.patch.object(BaseTestHandler, "flash", return_value=None) as patch:
fixture = ClientServerFixture(BaseTestHandler())
fixture.client.flash(options={"bar": "baz"})
fixture.handler.flash.assert_called_once_with(options={"bar": "baz"})
@tvm.testing.requires_micro
def test_open_transport(BaseTestHandler):
from tvm.micro import project_api
timeouts = project_api.server.TransportTimeouts(
session_start_retry_timeout_sec=1.0,
session_start_timeout_sec=2.0,
session_established_timeout_sec=3.0,
)
with mock.patch.object(BaseTestHandler, "open_transport", return_value=timeouts) as patch:
fixture = ClientServerFixture(BaseTestHandler())
assert fixture.client.open_transport(options={"bar": "baz"}) == {
"timeouts": dict(timeouts._asdict())
}
fixture.handler.open_transport.assert_called_once_with({"bar": "baz"})
@tvm.testing.requires_micro
def test_close_transport(BaseTestHandler):
with mock.patch.object(BaseTestHandler, "close_transport", return_value=None) as patch:
fixture = ClientServerFixture(BaseTestHandler())
fixture.client.close_transport()
fixture.handler.close_transport.assert_called_once_with()
@tvm.testing.requires_micro
def test_read_transport(BaseTestHandler):
from tvm.micro import project_api
with mock.patch.object(BaseTestHandler, "read_transport", return_value=b"foo\x1b") as patch:
fixture = ClientServerFixture(BaseTestHandler())
assert fixture.client.read_transport(128, timeout_sec=5.0) == {"data": b"foo\x1b"}
fixture.handler.read_transport.assert_called_with(128, 5.0)
fixture.handler.read_transport.side_effect = project_api.server.IoTimeoutError
with pytest.raises(project_api.server.IoTimeoutError) as exc_info:
fixture.client.read_transport(256, timeout_sec=10.0)
fixture.handler.read_transport.assert_called_with(256, 10.0)
fixture.handler.read_transport.side_effect = project_api.server.TransportClosedError
with pytest.raises(project_api.server.TransportClosedError) as exc_info:
fixture.client.read_transport(512, timeout_sec=15.0)
fixture.handler.read_transport.assert_called_with(512, 15.0)
assert fixture.handler.read_transport.call_count == 3
@tvm.testing.requires_micro
def test_write_transport(BaseTestHandler):
from tvm.micro import project_api
with mock.patch.object(BaseTestHandler, "write_transport", return_value=None) as patch:
fixture = ClientServerFixture(BaseTestHandler())
assert fixture.client.write_transport(b"foo", timeout_sec=5.0) is None
fixture.handler.write_transport.assert_called_with(b"foo", 5.0)
fixture.handler.write_transport.side_effect = project_api.server.IoTimeoutError
with pytest.raises(project_api.server.IoTimeoutError) as exc_info:
fixture.client.write_transport(b"bar", timeout_sec=10.0)
fixture.handler.write_transport.assert_called_with(b"bar", 10.0)
fixture.handler.write_transport.side_effect = project_api.server.TransportClosedError
with pytest.raises(project_api.server.TransportClosedError) as exc_info:
fixture.client.write_transport(b"baz", timeout_sec=15.0)
fixture.handler.write_transport.assert_called_with(b"baz", 15.0)
assert fixture.handler.write_transport.call_count == 3
class ProjectAPITestError(Exception):
"""An error raised in test."""
@tvm.testing.requires_micro
def test_method_raises_error(BaseTestHandler):
from tvm.micro import project_api
with mock.patch.object(
BaseTestHandler, "close_transport", side_effect=ProjectAPITestError
) as patch:
fixture = ClientServerFixture(BaseTestHandler())
with pytest.raises(project_api.server.ServerError) as exc_info:
fixture.client.close_transport()
fixture.handler.close_transport.assert_called_once_with()
assert "ProjectAPITestError" in str(exc_info.value)
@tvm.testing.requires_micro
def test_method_not_found(BaseTestHandler):
from tvm.micro import project_api
fixture = ClientServerFixture(BaseTestHandler())
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("invalid_method", {"bar": None})
assert exc_info.value.code == project_api.server.ErrorCode.METHOD_NOT_FOUND
@tvm.testing.requires_micro
def test_extra_param(BaseTestHandler):
from tvm.micro import project_api
fixture = ClientServerFixture(BaseTestHandler())
# test one with has_preprocssing and one without
assert hasattr(fixture.server, "_dispatch_build") == False
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("build", {"invalid_param_name": None, "options": {}})
assert exc_info.value.code == project_api.server.ErrorCode.INVALID_PARAMS
assert "build: extra parameters: invalid_param_name" in str(exc_info.value)
assert hasattr(fixture.server, "_dispatch_open_transport") == True
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("open_transport", {"invalid_param_name": None, "options": {}})
assert exc_info.value.code == project_api.server.ErrorCode.INVALID_PARAMS
assert "open_transport: extra parameters: invalid_param_name" in str(exc_info.value)
@tvm.testing.requires_micro
def test_missing_param(BaseTestHandler):
from tvm.micro import project_api
fixture = ClientServerFixture(BaseTestHandler())
# test one with has_preprocssing and one without
assert hasattr(fixture.server, "_dispatch_build") == False
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("build", {})
assert exc_info.value.code == project_api.server.ErrorCode.INVALID_PARAMS
assert "build: parameter options not given" in str(exc_info.value)
assert hasattr(fixture.server, "_dispatch_open_transport") == True
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("open_transport", {})
assert exc_info.value.code == project_api.server.ErrorCode.INVALID_PARAMS
assert "open_transport: parameter options not given" in str(exc_info.value)
@tvm.testing.requires_micro
def test_incorrect_param_type(BaseTestHandler):
from tvm.micro import project_api
fixture = ClientServerFixture(BaseTestHandler())
# The error message given at the JSON-RPC server level doesn't make sense when preprocessing is
# used. Only test without preprocessing here.
assert hasattr(fixture.server, "_dispatch_build") == False
with pytest.raises(project_api.server.JSONRPCError) as exc_info:
fixture.client._request_reply("build", {"options": None})
assert exc_info.value.code == project_api.server.ErrorCode.INVALID_PARAMS
assert "build: parameter options: want <class 'dict'>, got <class 'NoneType'>" in str(
exc_info.value
)
@tvm.testing.requires_micro
def test_invalid_request(BaseTestHandler):
from tvm.micro import project_api
fixture = ClientServerFixture(BaseTestHandler())
# Invalid JSON does not get a reply.
fixture.client_to_server.write(b"foobar\n")
assert fixture.server.serve_one_request() == False
assert fixture.server_to_client.read() == b""
# EOF causes a clean return
assert fixture.server.serve_one_request() == False
assert fixture.server_to_client.read() == b""
def _request_reply(request):
fixture.client_to_server.write(request + b"\n")
assert fixture.server.serve_one_request() == False
return json.loads(fixture.server_to_client.read())
# Parseable JSON with the wrong schema gets a reply.
assert _request_reply(b"1") == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": "request: want dict; got 1",
},
"id": None,
"jsonrpc": "2.0",
}
# Incorrect JSON-RPC spec version.
assert _request_reply(b'{"jsonrpc": 1.0}') == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": 'request["jsonrpc"]: want "2.0"; got 1.0',
},
"id": None,
"jsonrpc": "2.0",
}
# Method not a str
assert _request_reply(b'{"jsonrpc": "2.0", "method": 123}') == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": 'request["method"]: want str; got 123',
},
"id": None,
"jsonrpc": "2.0",
}
# Method name has invalid characters
assert _request_reply(b'{"jsonrpc": "2.0", "method": "bar!"}') == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": "request[\"method\"]: should match regex ^[a-zA-Z0-9_]+$; got 'bar!'",
},
"id": None,
"jsonrpc": "2.0",
}
# params not a dict
assert _request_reply(b'{"jsonrpc": "2.0", "method": "bar", "params": 123}') == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": "request[\"params\"]: want dict; got <class 'int'>",
},
"id": None,
"jsonrpc": "2.0",
}
# id not valid
assert _request_reply(b'{"jsonrpc": "2.0", "method": "bar", "params": {}, "id": {}}') == {
"error": {
"code": project_api.server.ErrorCode.INVALID_REQUEST,
"data": None,
"message": 'request["id"]: want str, number, null; got {}',
},
"id": None,
"jsonrpc": "2.0",
}
@tvm.testing.requires_micro
def test_default_project_options():
from tvm.micro import project_api
default_options = project_api.server.default_project_options()
names = []
for option in default_options:
names.append(option.name)
if option.name == "verbose":
assert "generate_project" in option.optional
if option.name in ["project_type", "board"]:
assert "generate_project" in option.required
if option.name == "warning_as_error":
assert "generate_project" in option.optional
for name in ["verbose", "project_type", "board", "cmsis_path", "warning_as_error"]:
assert name in names
@tvm.testing.requires_micro
def test_modified_project_options():
from tvm.micro import project_api
modified_options = project_api.server.default_project_options(
verbose={"optional": ["flash"], "required": ["build"]},
board={"choices": ["board1", "board2"]},
)
for option in modified_options:
if option.name == "verbose":
assert option.optional == ["flash"]
assert option.required == ["build"]
if option.name == "board":
assert option.choices == ["board1", "board2"]
if __name__ == "__main__":
tvm.testing.main()
| 18,979 | 34.543071 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_algo_hill_climb.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import random
import tvm
import tvm.testing
from tvm.tir.usmp.utils import BufferInfo
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size, tolerance=0):
"""Helper to check maximum allocated memory size"""
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
_diff = max_workspace_size.value - size
return (
(max_workspace_size.value == size if tolerance == 0 else tolerance > 100 * _diff / size),
"'{}': expected {} got {}, diff {:0.2f}% ({} bytes)".format(
pool_info.pool_name, size, max_workspace_size, 100 * _diff / size, _diff
),
)
def _verify_conflicts(buffer_info, pool_allocation, buffer_info_map):
"""Helper to check expected liveness conflicts"""
for conflict in buffer_info.conflicts:
conflict_pool_allocation = buffer_info_map[conflict]
if conflict_pool_allocation.pool_info == pool_allocation.pool_info:
assert conflict_pool_allocation.byte_offset != pool_allocation.byte_offset
l2 = max(
conflict_pool_allocation.byte_offset + conflict.size_bytes,
pool_allocation.byte_offset + buffer_info.size_bytes,
) - min(conflict_pool_allocation.byte_offset, pool_allocation.byte_offset)
assert (
conflict.size_bytes + buffer_info.size_bytes <= l2
), 'Conflicting: \n"{} @{}"\n"{} @{}"'.format(
conflict, conflict_pool_allocation, buffer_info, pool_allocation
)
def _verify_all_conflicts(buffer_pool_allocations):
"""Helper to verify liveness conflicts"""
for buffer_info, pool_allocation in buffer_pool_allocations.items():
_verify_conflicts(buffer_info, pool_allocation, buffer_pool_allocations)
def test_bounded(
random_len=150,
pools=[
WorkspacePoolInfo("default", [], PoolInfoProperties(65535)),
WorkspacePoolInfo("slow", []),
],
):
"""Tests two pools, one is bounded and one is not limited"""
random.seed(0)
mem_range = [BufferInfo(str(i), random.randrange(1, 65535), pools) for i in range(random_len)]
for mr in mem_range:
pr = random.choice(mem_range)
while pr in (*mr.conflicts, mr):
pr = random.choice(mem_range)
mr.set_conflicts([*mr.conflicts, pr])
pr.set_conflicts([*pr.conflicts, mr])
fusmp_algo = tvm.get_global_func("tir.usmp.algo.hill_climb")
result_map = fusmp_algo(mem_range, 0)
_verify_all_conflicts(result_map)
def __test_data_alloc_max():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 13, 7904),
(4, 35, 16),
(12, 17, 32768),
(16, 21, 32768),
]
return intervals
def __test_data_deep_speech():
"""Test data"""
intervals = [
(0, 159, 2048),
(0, 151, 2048),
(0, 13, 7904),
(2, 49, 16),
(4, 35, 16),
(6, 21, 16),
(12, 17, 32768),
(16, 21, 32768),
(20, 27, 32768),
(26, 31, 32768),
(30, 35, 32768),
(34, 41, 32768),
(40, 45, 32768),
(44, 49, 32768),
(48, 145, 32768),
(54, 59, 2048),
(58, 483, 4096),
(60, 65, 2048),
(64, 461, 4096),
(66, 71, 2048),
(70, 439, 4096),
(72, 77, 2048),
(76, 417, 4096),
(78, 83, 2048),
(82, 395, 4096),
(84, 89, 2048),
(88, 373, 4096),
(90, 95, 2048),
(94, 351, 4096),
(96, 101, 2048),
(100, 329, 4096),
(102, 107, 2048),
(106, 307, 4096),
(108, 113, 2048),
(112, 285, 4096),
(114, 119, 2048),
(118, 263, 4096),
(120, 125, 2048),
(124, 241, 4096),
(126, 131, 2048),
(130, 219, 4096),
(132, 137, 2048),
(136, 197, 4096),
(138, 143, 2048),
(142, 175, 4096),
(144, 149, 2048),
(148, 153, 4096),
(152, 163, 8192),
(154, 171, 2048),
(156, 181, 2048),
(160, 167, 2048),
(162, 165, 2048),
(168, 171, 2048),
(170, 509, 2048),
(174, 185, 8192),
(176, 193, 2048),
(178, 203, 2048),
(182, 189, 2048),
(184, 187, 2048),
(190, 193, 2048),
(192, 511, 2048),
(196, 207, 8192),
(198, 215, 2048),
(200, 225, 2048),
(204, 211, 2048),
(206, 209, 2048),
(212, 215, 2048),
(214, 513, 2048),
(218, 229, 8192),
(220, 237, 2048),
(222, 247, 2048),
(226, 233, 2048),
(228, 231, 2048),
(234, 237, 2048),
(236, 515, 2048),
(240, 251, 8192),
(242, 259, 2048),
(244, 269, 2048),
(248, 255, 2048),
(250, 253, 2048),
(256, 259, 2048),
(258, 517, 2048),
(262, 273, 8192),
(264, 281, 2048),
(266, 291, 2048),
(270, 277, 2048),
(272, 275, 2048),
(278, 281, 2048),
(280, 519, 2048),
(284, 295, 8192),
(286, 303, 2048),
(288, 313, 2048),
(292, 299, 2048),
(294, 297, 2048),
(300, 303, 2048),
(302, 521, 2048),
(306, 317, 8192),
(308, 325, 2048),
(310, 335, 2048),
(314, 321, 2048),
(316, 319, 2048),
(322, 325, 2048),
(324, 523, 2048),
(328, 339, 8192),
(330, 347, 2048),
(332, 357, 2048),
(336, 343, 2048),
(338, 341, 2048),
(344, 347, 2048),
(346, 525, 2048),
(350, 361, 8192),
(352, 369, 2048),
(354, 379, 2048),
(358, 365, 2048),
(360, 363, 2048),
(366, 369, 2048),
(368, 527, 2048),
(372, 383, 8192),
(374, 391, 2048),
(376, 401, 2048),
(380, 387, 2048),
(382, 385, 2048),
(388, 391, 2048),
(390, 529, 2048),
(394, 405, 8192),
(396, 413, 2048),
(398, 423, 2048),
(402, 409, 2048),
(404, 407, 2048),
(410, 413, 2048),
(412, 531, 2048),
(416, 427, 8192),
(418, 435, 2048),
(420, 445, 2048),
(424, 431, 2048),
(426, 429, 2048),
(432, 435, 2048),
(434, 533, 2048),
(438, 449, 8192),
(440, 457, 2048),
(442, 467, 2048),
(446, 453, 2048),
(448, 451, 2048),
(454, 457, 2048),
(456, 535, 2048),
(460, 471, 8192),
(462, 479, 2048),
(464, 489, 2048),
(468, 475, 2048),
(470, 473, 2048),
(476, 479, 2048),
(478, 537, 2048),
(482, 493, 8192),
(484, 501, 2048),
(486, 497, 2048),
(490, 497, 2048),
(492, 495, 2048),
(496, 626, 2048),
(498, 501, 2048),
(500, 626, 2048),
(504, 549, 16),
(508, 543, 32768),
(542, 549, 32768),
(548, 555, 32768),
(554, 563, 464),
(560, 563, 256),
(562, 617, 2048),
(564, 567, 1856),
(566, 573, 1024),
(568, 619, 1024),
(570, 573, 1024),
(572, 577, 1024),
(576, 579, 1024),
(578, 605, 1024),
(580, 593, 1024),
(584, 587, 1024),
(586, 603, 1024),
(594, 597, 1024),
(596, 613, 1024),
(604, 607, 1024),
(606, 617, 1024),
(616, 621, 2048),
(618, 621, 1024),
(620, 626, 464),
]
return intervals
def __test_data_five():
"""Test data"""
return [
(4, 5, 95),
(1, 4, 52135),
(3, 4, 12136),
(3, 5, 62099),
(4, 5, 50458),
]
def __test_data_simple():
"""Test data"""
return [
(0, 23, 131072), # 0
(4, 5, 65568), # 1
(4, 9, 8192), # 2
(8, 30, 15360), # 3
(10, 11, 65568), # 4
(10, 15, 4096), # 5
(16, 17, 65552), # 6
(16, 21, 2048), # 7
(22, 23, 32784), # 8
(22, 27, 1024), # 9
]
def find_maximum_from_intervals(intervals):
"""Expected list of intervals of (start, end, size)"""
sorted_list = sorted(intervals, key=lambda _: _[0])
max_mem = 0
for t in range(sorted_list[0][0], sorted_list[-1][1] + 1):
max_mem = max(
max_mem, sum([size for (start, end, size) in sorted_list if t >= start and t <= end])
)
return max_mem
@pytest.mark.parametrize(
"intervals",
[__test_data_alloc_max(), __test_data_simple(), __test_data_deep_speech(), __test_data_five()],
)
def test_intervals(intervals):
"""Tests supplied intervals"""
random.seed(0)
result = run_intervals(intervals, 5)
assert result["tir.usmp.algo.hill_climb"] == True, f" {result}"
def generate_range(sz, max_segment_sz=65535):
"""Helper func to generate list of size sz of ranges of random size max_segment_sz"""
for i in range(0, sz):
start = random.randrange(i, sz)
stop = random.randrange(start + 1, start + 2 + ((sz - start) // 2))
assert stop - start > 0
yield (start, stop, random.randrange(1, max_segment_sz))
def test_random_intervals(interval_len=16):
"""Tests randomly generated interval of length interval_len"""
random.seed(0)
intervals = list(generate_range(interval_len))
return run_intervals(intervals)
def run_intervals(intervals, tolerance=0):
"""Helper to run intervals"""
expected_mem = find_maximum_from_intervals(intervals)
pools = [WorkspacePoolInfo("default", [])]
buffers = []
# populate
for i, (start, stop, size) in enumerate(intervals):
buf = BufferInfo(str(i), size, pools)
# buf.set_pool_candidates( ["default"] )
buffers.append(buf)
# intersect
for i, (i_start, i_stop, _) in enumerate(intervals):
conflicts = set()
for j, (j_start, j_stop, _) in enumerate(intervals):
start = min(i_start, j_start)
stop = max(i_stop, j_stop)
i_dur = i_stop - i_start + 1
j_dur = j_stop - j_start + 1
if i != j and (stop - start + 1 < i_dur + j_dur):
conflicts.add(buffers[j])
buffers[i].set_conflicts([c for c in sorted(conflicts, key=lambda c: c.name_hint)])
result = {}
for (alg, params) in [
("tir.usmp.algo.hill_climb", (expected_mem,)),
("tir.usmp.algo.greedy_by_size", (expected_mem,)),
]:
fusmp_algo = tvm.get_global_func(alg)
print("\n", "started", alg)
buffer_info_arr = fusmp_algo(buffers, *params)
print()
_verify_all_conflicts(buffer_info_arr)
result[alg], msg = _check_max_workspace_size(
buffer_info_arr, pools[0], expected_mem, tolerance
)
if not result[alg]:
print(alg, msg)
return result
if __name__ == "__main__":
tvm.testing.main()
| 12,082 | 28.834568 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.