repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/unittest/test_arith_narrow_predicate_expression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import tir
from tvm.runtime import convert
i = tir.Var("i", "int32")
j = tir.Var("j", "int32")
n = tir.Var("n", "int32")
m = tir.Var("m", "int32")
b = tir.Var("b", "bool")
buf = tir.decl_buffer(16, "int32", "buf")
tir_false = tir.IntImm("bool", False)
tir_true = tir.IntImm("bool", True)
before, expected = tvm.testing.parameters(
# General arithmatic
[tir_true, tir_true],
[tir_false, tir_false],
[b, b],
[i > 5, i > 5],
[i > n, i > 7],
[i < n, i < 0],
[i <= n, i <= 0],
[i >= n, i >= 7],
[n > i, convert(0) > i],
[n < i, convert(7) < i],
[n <= i, convert(7) <= i],
[n >= i, convert(0) >= i],
[i == n, tir.all(i <= 0, convert(7) <= i)],
[n == i, tir.all(convert(7) <= i, i <= 0)],
[i != n, tir.any(i < 0, convert(7) < i)],
[n != i, tir.any(convert(7) < i, i < 0)],
[i // 4 > n, i // 4 > 7],
[n < i // 4, convert(7) < i // 4],
[(i + n) // 4 > 0, tir.Add(i, 0) // 4 > 0],
[(i + n) // 4 == 0, tir.all(tir.Add(i, 7) // 4 <= 0, convert(0) <= tir.Add(i, 0) // 4)],
[i + n < 10, i + 7 < 10],
[i - n < 10, tir.Sub(i, 0) < 10],
[tir.Not(i < n), tir.Not(i < 7)],
# Use of FloorMod should make the narrowing strategy bail out, as
# it is non-monotonic.
[i % 8 == n, tir_false],
# Ensure that dividing by a free parameter doesn't generate a
# divide-by-zero to be triggered later.
[i // n == 0, tir_false],
### Buffer handling
[buf.vload(0) > 0, tir_false],
[buf.vload(0) > i, tir_false],
[buf.vload(i) > 0, tir_false],
[tir.And(buf.vload(i) > 0, i <= 0), tir.And(tir_false, i <= 0)],
[tir.Or(buf.vload(i) > 0, i <= n), tir.Or(tir_false, i <= 0)],
[tir.Or(tir.Not(buf.vload(i) > 0), i <= n), tir.Or(tir_false, i <= 0)],
)
def test_narrow_expression(before, expected):
ranges = {n: tvm.ir.Range(0, 8)}
after = tvm.arith._ffi_api.NarrowPredicateExpression(before, ranges)
if expected is None:
assert after is None
else:
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
tvm.testing.main()
| 2,920 | 32.193182 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_droplet_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test droplet algorithm tuner"""
from tvm.testing.autotvm import DummyRunner, get_sample_task, get_sample_records
from tvm import autotvm
def test_tuner():
"""Test Droplet Tuner"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
# When no range index, range_length should be the length of config space
tuner = autotvm.tuner.DropletTuner(task)
assert len(tuner.best_choice) == 3
assert tuner.execution == 1
assert tuner.batch == 16
assert tuner.total_execution == max(tuner.dims)
assert tuner.step == 1
def test_multi_filter():
# Test with multi-filter
task, _ = get_sample_task()
task.config_space.multi_filter(
filter=lambda entity: 0 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024
)
tuner = autotvm.tuner.DropletTuner(task)
valid_indexes = list(
filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length))
)
assert tuner.visited.issubset(valid_indexes)
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
tuner.tune(n_trial=8, measure_option=measure_option)
assert tuner.visited.issubset(valid_indexes)
if __name__ == "__main__":
test_tuner()
test_multi_filter()
| 2,128 | 35.706897 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_python_doc_printer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import pytest
import tvm
from tvm.script.printer.doc import (
AssertDoc,
AssignDoc,
CallDoc,
ClassDoc,
CommentDoc,
DictDoc,
DocStringDoc,
ExprStmtDoc,
ForDoc,
FunctionDoc,
IdDoc,
IfDoc,
LambdaDoc,
ListDoc,
LiteralDoc,
OperationDoc,
OperationKind,
ReturnDoc,
ScopeDoc,
SliceDoc,
StmtBlockDoc,
TupleDoc,
WhileDoc,
)
from tvm.script.printer.doc_printer import to_python_script
def format_script(s: str) -> str:
"""
Remove leading and trailing blank lines, and make the minimum idention 0
"""
s = s.strip("\n")
non_empty_lines = [line for line in s.splitlines() if line and not line.isspace()]
if not non_empty_lines:
# no actual content
return ""
line_indents = [len(line) - len(line.lstrip(" ")) for line in non_empty_lines]
spaces_to_remove = min(line_indents)
cleaned_lines = "\n".join(line[spaces_to_remove:] for line in s.splitlines())
if not cleaned_lines.endswith("\n"):
cleaned_lines += "\n"
return cleaned_lines.strip()
@pytest.mark.parametrize(
"doc,expected",
[
(LiteralDoc(None), "None"),
(LiteralDoc(True), "True"),
(LiteralDoc(False), "False"),
(LiteralDoc("test"), '"test"'),
(LiteralDoc(""), '""'),
(LiteralDoc('""'), r'"\"\""'),
(LiteralDoc("\n\t\\test\r"), r'"\n\t\\test\r"'),
# TODO: fix the roundatrippable problem caused by utf8
pytest.param(LiteralDoc("\x88"), r'"\x88"', marks=pytest.mark.xfail),
(LiteralDoc(0), "0"),
(LiteralDoc(-1), "-1"),
(LiteralDoc(3.25), "3.25"),
(LiteralDoc(-0.5), "-0.5"),
],
ids=itertools.count(),
)
def test_print_literal_doc(doc, expected):
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"name",
[
"test",
"_test",
"TestCase",
"test_case",
"test123",
],
ids=itertools.count(),
)
def test_print_id_doc(name):
doc = IdDoc(name)
assert to_python_script(doc) == format_script(name)
@pytest.mark.parametrize(
"attr",
[
"attr",
"_attr",
"Attr",
"attr_1",
],
ids=itertools.count(),
)
def test_print_attr_doc(attr):
doc = IdDoc("x").attr(attr)
assert to_python_script(doc) == format_script(f"x.{attr}")
@pytest.mark.parametrize(
"indices, expected",
[
(
(),
"[()]",
),
(
(LiteralDoc(1),),
"[1]",
),
(
(LiteralDoc(2), IdDoc("x")),
"[2, x]",
),
(
(SliceDoc(LiteralDoc(1), LiteralDoc(2)),),
"[1:2]",
),
(
(SliceDoc(LiteralDoc(1)), IdDoc("y")),
"[1:, y]",
),
(
(SliceDoc(), IdDoc("y")),
"[:, y]",
),
(
(IdDoc("x"), IdDoc("y"), IdDoc("z")),
"[x, y, z]",
),
],
ids=itertools.count(),
)
def test_print_index_doc(indices, expected):
doc = IdDoc("x")[indices]
assert to_python_script(doc) == format_script(f"x{expected}")
UNARY_OP_TOKENS = {
OperationKind.USub: "-",
OperationKind.Invert: "~",
OperationKind.Not: "not ",
}
@pytest.mark.parametrize(
"op_kind, expected_token",
list(UNARY_OP_TOKENS.items()),
ids=UNARY_OP_TOKENS.keys(),
)
def test_print_unary_operation_doc(op_kind, expected_token):
doc = OperationDoc(op_kind, [IdDoc("x")])
assert to_python_script(doc) == format_script(f"{expected_token}x")
BINARY_OP_TOKENS = {
OperationKind.Add: "+",
OperationKind.Sub: "-",
OperationKind.Mult: "*",
OperationKind.Div: "/",
OperationKind.FloorDiv: "//",
OperationKind.Mod: "%",
OperationKind.Pow: "**",
OperationKind.LShift: "<<",
OperationKind.RShift: ">>",
OperationKind.BitAnd: "&",
OperationKind.BitOr: "|",
OperationKind.BitXor: "^",
OperationKind.Lt: "<",
OperationKind.LtE: "<=",
OperationKind.Eq: "==",
OperationKind.NotEq: "!=",
OperationKind.Gt: ">",
OperationKind.GtE: ">=",
OperationKind.And: "and",
OperationKind.Or: "or",
}
@pytest.mark.parametrize(
"op_kind, expected_token",
list(BINARY_OP_TOKENS.items()),
ids=BINARY_OP_TOKENS.keys(),
)
def test_print_binary_operation_doc(op_kind, expected_token):
doc = OperationDoc(op_kind, [IdDoc("x"), IdDoc("y")])
assert to_python_script(doc) == format_script(f"x {expected_token} y")
SPECIAL_OP_CASES = [
(
OperationKind.IfThenElse,
[LiteralDoc(True), LiteralDoc("true"), LiteralDoc("false")],
'"true" if True else "false"',
),
(
OperationKind.IfThenElse,
[IdDoc("x"), LiteralDoc(None), LiteralDoc(1)],
"None if x else 1",
),
]
@pytest.mark.parametrize(
"op_kind, operands, expected", SPECIAL_OP_CASES, ids=[kind for (kind, *_) in SPECIAL_OP_CASES]
)
def test_print_special_operation_doc(op_kind, operands, expected):
doc = OperationDoc(op_kind, operands)
assert to_python_script(doc) == format_script(expected)
def test_operation_doc_test_exhaustive():
special_op_covered = {k for k, *_ in SPECIAL_OP_CASES}
for op_kind in OperationKind:
if OperationKind._UnaryStart < op_kind < OperationKind._UnaryEnd:
assert op_kind in UNARY_OP_TOKENS, (
f"{op_kind.name} not covered in test_print_unary_operation_doc. "
f"Please add the expected token to UNARY_OP_TOKENS"
)
elif OperationKind._BinaryStart < op_kind < OperationKind._BinaryEnd:
assert op_kind in BINARY_OP_TOKENS, (
f"{op_kind.name} not covered in test_print_binary_operation_doc. "
f"Please add the expected token to BINARY_OP_TOKENS"
)
elif not op_kind.name.startswith("_"):
# Special Op
assert op_kind in special_op_covered, (
f"{op_kind.name} not covered in test_print_special_operation_doc. "
f"Please add the test cases for it to SPECIAL_OP_CASES"
)
@pytest.mark.parametrize(
"args, kwargs, expected",
[
(
(),
{},
"()",
),
(
(),
{"key0": IdDoc("u")},
"(key0=u)",
),
(
(),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(key0=u, key1=v)",
),
(
(IdDoc("x"),),
{},
"(x)",
),
(
(IdDoc("x"),),
{"key0": IdDoc("u")},
"(x, key0=u)",
),
(
(IdDoc("x"),),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(x, key0=u, key1=v)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{},
"(x, y)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{"key0": IdDoc("u")},
"(x, y, key0=u)",
),
(
(IdDoc("x"), (IdDoc("y"))),
{"key0": IdDoc("u"), "key1": IdDoc("v")},
"(x, y, key0=u, key1=v)",
),
],
ids=itertools.count(),
)
def test_print_call_doc(args, kwargs, expected):
doc = CallDoc(IdDoc("f"), *args, **kwargs)
assert to_python_script(doc) == format_script(f"f{expected}")
@pytest.mark.parametrize(
"args, expected",
[
(
(),
"lambda : 0",
),
(
(IdDoc("x"),),
"lambda x: 0",
),
(
(IdDoc("x"), IdDoc("y")),
"lambda x, y: 0",
),
(
(IdDoc("x"), IdDoc("y"), IdDoc("z")),
"lambda x, y, z: 0",
),
],
ids=itertools.count(),
)
def test_print_lambda_doc(args, expected):
doc = LambdaDoc(args, body=LiteralDoc(0))
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"elements, expected",
[
(
(),
"[]",
),
(
[IdDoc("x")],
"[x]",
),
(
[IdDoc("x"), IdDoc("y")],
"[x, y]",
),
(
[IdDoc("x"), IdDoc("y"), IdDoc("z")],
"[x, y, z]",
),
],
ids=itertools.count(),
)
def test_print_list_doc(elements, expected):
doc = ListDoc(elements)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"elements, expected",
[
(
(),
"()",
),
(
[IdDoc("x")],
"(x,)",
),
(
[IdDoc("x"), IdDoc("y")],
"(x, y)",
),
(
[IdDoc("x"), IdDoc("y"), IdDoc("z")],
"(x, y, z)",
),
],
ids=itertools.count(),
)
def test_print_tuple_doc(elements, expected):
doc = TupleDoc(elements)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"content, expected",
[
(
{},
"{}",
),
(
{LiteralDoc("key_x"): IdDoc("x")},
'{"key_x": x}',
),
(
{LiteralDoc("key_x"): IdDoc("x"), LiteralDoc("key_y"): IdDoc("y")},
'{"key_x": x, "key_y": y}',
),
(
{
LiteralDoc("key_x"): IdDoc("x"),
LiteralDoc("key_y"): IdDoc("y"),
LiteralDoc("key_z"): IdDoc("z"),
},
'{"key_x": x, "key_y": y, "key_z": z}',
),
],
ids=itertools.count(),
)
def test_print_dict_doc(content, expected):
doc = DictDoc(content)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"slice_doc, expected",
[
(
SliceDoc(),
":",
),
(
SliceDoc(LiteralDoc(1)),
"1:",
),
(
SliceDoc(None, LiteralDoc(2)),
":2",
),
(
SliceDoc(LiteralDoc(1), LiteralDoc(2)),
"1:2",
),
(
SliceDoc(None, None, LiteralDoc(3)),
"::3",
),
(
SliceDoc(LiteralDoc(1), None, LiteralDoc(3)),
"1::3",
),
(
SliceDoc(None, LiteralDoc(2), LiteralDoc(3)),
":2:3",
),
(
SliceDoc(LiteralDoc(1), LiteralDoc(2), LiteralDoc(3)),
"1:2:3",
),
],
ids=itertools.count(),
)
def test_print_slice_doc(slice_doc, expected):
doc = IdDoc("x")[slice_doc]
assert to_python_script(doc) == format_script(f"x[{expected}]")
@pytest.mark.parametrize(
"stmts, expected",
[
(
[],
"",
),
(
[ExprStmtDoc(IdDoc("x"))],
"x",
),
(
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
"""
x
y
""",
),
],
ids=itertools.count(),
)
def test_print_stmt_block_doc(stmts, expected):
doc = StmtBlockDoc(stmts)
assert to_python_script(doc).strip() == format_script(expected).strip()
@pytest.mark.parametrize(
"doc, expected",
[
(
AssignDoc(IdDoc("x"), IdDoc("y"), None),
"x = y",
),
(
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
"x: int = y",
),
(
AssignDoc(IdDoc("x"), None, IdDoc("int")),
"x: int",
),
(
AssignDoc(TupleDoc([IdDoc("x"), IdDoc("y")]), IdDoc("z"), None),
"x, y = z",
),
(
AssignDoc(TupleDoc([IdDoc("x"), TupleDoc([IdDoc("y"), IdDoc("z")])]), IdDoc("z"), None),
"x, (y, z) = z",
),
],
ids=itertools.count(),
)
def test_print_assign_doc(doc, expected):
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"then_branch, else_branch, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
[],
"""
if pred:
x
""",
),
(
[],
[ExprStmtDoc(IdDoc("y"))],
"""
if pred:
pass
else:
y
""",
),
(
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("y"))],
"""
if pred:
x
else:
y
""",
),
],
ids=itertools.count(),
)
def test_print_if_doc(then_branch, else_branch, expected):
doc = IfDoc(IdDoc("pred"), then_branch, else_branch)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"body, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
"""
while pred:
x
""",
),
(
[],
"""
while pred:
pass
""",
),
],
ids=itertools.count(),
)
def test_print_while_doc(body, expected):
doc = WhileDoc(IdDoc("pred"), body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"body, expected",
[
(
[ExprStmtDoc(IdDoc("x"))],
"""
for x in y:
x
""",
),
(
[],
"""
for x in y:
pass
""",
),
],
ids=itertools.count(),
)
def test_print_for_doc(body, expected):
doc = ForDoc(IdDoc("x"), IdDoc("y"), body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"lhs, body, expected",
[
(
IdDoc("c"),
[ExprStmtDoc(IdDoc("x"))],
"""
with context() as c:
x
""",
),
(
IdDoc("c"),
[],
"""
with context() as c:
pass
""",
),
(
None,
[],
"""
with context():
pass
""",
),
(
None,
[ExprStmtDoc(IdDoc("x"))],
"""
with context():
x
""",
),
],
ids=itertools.count(),
)
def test_print_scope_doc(lhs, body, expected):
doc = ScopeDoc(lhs, CallDoc(IdDoc("context")), body)
assert to_python_script(doc) == format_script(expected)
def test_print_expr_stmt_doc():
doc = ExprStmtDoc(CallDoc(IdDoc("f"), IdDoc("x")))
assert to_python_script(doc) == format_script("f(x)")
@pytest.mark.parametrize(
"msg, expected",
[
(
None,
"""
assert True
""",
),
(
LiteralDoc("test message"),
"""
assert True, "test message"
""",
),
],
ids=itertools.count(),
)
def test_print_assert_doc(msg, expected):
test = LiteralDoc(True)
doc = AssertDoc(test, msg)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"value, expected",
[
(
LiteralDoc(None),
"""
return None
""",
),
(
IdDoc("x"),
"""
return x
""",
),
],
ids=itertools.count(),
)
def test_print_return_doc(value, expected):
doc = ReturnDoc(value)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"args, decorators, return_type, body, expected",
[
(
[],
[],
None,
[],
"""
def func():
pass
""",
),
(
[AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int"))],
[],
IdDoc("int"),
[],
"""
def func(x: int) -> int:
pass
""",
),
(
[AssignDoc(IdDoc("x"), rhs=LiteralDoc(1), annotation=IdDoc("int"))],
[],
LiteralDoc(None),
[],
"""
def func(x: int = 1) -> None:
pass
""",
),
(
[],
[IdDoc("wrap")],
LiteralDoc(None),
[],
"""
@wrap
def func() -> None:
pass
""",
),
(
[],
[IdDoc("wrap_outter"), IdDoc("wrap_inner")],
LiteralDoc(None),
[],
"""
@wrap_outter
@wrap_inner
def func() -> None:
pass
""",
),
(
[
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
],
[IdDoc("wrap")],
LiteralDoc(None),
[],
"""
@wrap
def func(x: int, y: int = 1) -> None:
pass
""",
),
(
[
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
],
[IdDoc("wrap")],
LiteralDoc(None),
[
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Add, [IdDoc("x"), LiteralDoc(1)])),
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Sub, [IdDoc("y"), LiteralDoc(1)])),
],
"""
@wrap
def func(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
],
ids=itertools.count(),
)
def test_print_function_doc(args, decorators, body, return_type, expected):
doc = FunctionDoc(IdDoc("func"), args, decorators, return_type, body)
assert to_python_script(doc) == format_script(expected) # test
def get_func_doc_for_class(name):
args = [
AssignDoc(IdDoc("x"), rhs=None, annotation=IdDoc("int")),
AssignDoc(IdDoc("y"), rhs=LiteralDoc(1), annotation=IdDoc("int")),
]
body = [
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Add, [IdDoc("x"), LiteralDoc(1)])),
AssignDoc(IdDoc("y"), OperationDoc(OperationKind.Sub, [IdDoc("y"), LiteralDoc(1)])),
]
return FunctionDoc(
name=IdDoc(name),
args=args,
decorators=[IdDoc("wrap")],
return_type=LiteralDoc(None),
body=body,
)
@pytest.mark.parametrize(
"decorators, body, expected",
[
(
[],
[],
"""
class TestClass:
pass
""",
),
(
[IdDoc("wrap")],
[],
"""
@wrap
class TestClass:
pass
""",
),
(
[IdDoc("wrap_outter"), IdDoc("wrap_inner")],
[],
"""
@wrap_outter
@wrap_inner
class TestClass:
pass
""",
),
(
[IdDoc("wrap")],
[get_func_doc_for_class("f1")],
"""
@wrap
class TestClass:
@wrap
def f1(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
(
[IdDoc("wrap")],
[get_func_doc_for_class("f1"), get_func_doc_for_class("f2")],
"""
@wrap
class TestClass:
@wrap
def f1(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
@wrap
def f2(x: int, y: int = 1) -> None:
y = x + 1
y = y - 1
""",
),
],
ids=itertools.count(),
)
def test_print_class_doc(decorators, body, expected):
doc = ClassDoc(IdDoc("TestClass"), decorators, body)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"comment, expected",
[
(
"",
"",
),
(
"test comment 1",
"# test comment 1",
),
(
"test comment 1\ntest comment 2",
"""
# test comment 1
# test comment 2
""",
),
],
ids=itertools.count(),
)
def test_print_comment_doc(comment, expected):
doc = CommentDoc(comment)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"comment, expected",
[
(
"",
"",
),
(
"test comment 1",
'''
"""
test comment 1
"""
''',
),
(
"test comment 1\ntest comment 2",
'''
"""
test comment 1
test comment 2
"""
''',
),
],
ids=itertools.count(),
)
def test_print_doc_string_doc(comment, expected):
doc = DocStringDoc(comment)
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"doc, comment, expected",
[
(
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
"comment",
"""
x: int = y # comment
""",
),
(
IfDoc(IdDoc("x"), [ExprStmtDoc(IdDoc("y"))], [ExprStmtDoc(IdDoc("z"))]),
"comment",
"""
# comment
if x:
y
else:
z
""",
),
(
IfDoc(IdDoc("x"), [ExprStmtDoc(IdDoc("y"))], [ExprStmtDoc(IdDoc("z"))]),
"comment line 1\ncomment line 2",
"""
# comment line 1
# comment line 2
if x:
y
else:
z
""",
),
(
WhileDoc(
LiteralDoc(True),
[
AssignDoc(IdDoc("x"), IdDoc("y")),
],
),
"comment",
"""
# comment
while True:
x = y
""",
),
(
ForDoc(IdDoc("x"), IdDoc("y"), []),
"comment",
"""
# comment
for x in y:
pass
""",
),
(
ScopeDoc(IdDoc("x"), IdDoc("y"), []),
"comment",
"""
# comment
with y as x:
pass
""",
),
(
ExprStmtDoc(IdDoc("x")),
"comment",
"""
x # comment
""",
),
(
AssertDoc(LiteralDoc(True)),
"comment",
"""
assert True # comment
""",
),
(
ReturnDoc(LiteralDoc(1)),
"comment",
"""
return 1 # comment
""",
),
(
get_func_doc_for_class("f"),
"comment",
'''
@wrap
def f(x: int, y: int = 1) -> None:
"""
comment
"""
y = x + 1
y = y - 1
''',
),
(
get_func_doc_for_class("f"),
"comment line 1\n\ncomment line 3",
'''
@wrap
def f(x: int, y: int = 1) -> None:
"""
comment line 1
comment line 3
"""
y = x + 1
y = y - 1
''',
),
(
ClassDoc(IdDoc("TestClass"), decorators=[IdDoc("wrap")], body=[]),
"comment",
'''
@wrap
class TestClass:
"""
comment
"""
pass
''',
),
(
ClassDoc(IdDoc("TestClass"), decorators=[IdDoc("wrap")], body=[]),
"comment line 1\n\ncomment line 3",
'''
@wrap
class TestClass:
"""
comment line 1
comment line 3
"""
pass
''',
),
],
ids=itertools.count(),
)
def test_print_doc_comment(doc, comment, expected):
doc.comment = comment
assert to_python_script(doc) == format_script(expected)
@pytest.mark.parametrize(
"doc",
[
AssignDoc(IdDoc("x"), IdDoc("y"), IdDoc("int")),
ExprStmtDoc(IdDoc("x")),
AssertDoc(IdDoc("x")),
ReturnDoc(IdDoc("x")),
],
)
def test_print_invalid_multiline_doc_comment(doc):
doc.comment = "1\n2"
with pytest.raises(ValueError) as e:
to_python_script(doc)
assert "cannot have newline" in str(e.value)
def generate_expr_precedence_test_cases():
x = IdDoc("x")
y = IdDoc("y")
z = IdDoc("z")
def negative(a):
return OperationDoc(OperationKind.USub, [a])
def invert(a):
return OperationDoc(OperationKind.Invert, [a])
def not_(a):
return OperationDoc(OperationKind.Not, [a])
def add(a, b):
return OperationDoc(OperationKind.Add, [a, b])
def sub(a, b):
return OperationDoc(OperationKind.Sub, [a, b])
def mult(a, b):
return OperationDoc(OperationKind.Mult, [a, b])
def div(a, b):
return OperationDoc(OperationKind.Div, [a, b])
def mod(a, b):
return OperationDoc(OperationKind.Mod, [a, b])
def pow(a, b):
return OperationDoc(OperationKind.Pow, [a, b])
def lshift(a, b):
return OperationDoc(OperationKind.LShift, [a, b])
def bit_and(a, b):
return OperationDoc(OperationKind.BitAnd, [a, b])
def bit_or(a, b):
return OperationDoc(OperationKind.BitOr, [a, b])
def bit_xor(a, b):
return OperationDoc(OperationKind.BitXor, [a, b])
def lt(a, b):
return OperationDoc(OperationKind.Lt, [a, b])
def eq(a, b):
return OperationDoc(OperationKind.Eq, [a, b])
def not_eq(a, b):
return OperationDoc(OperationKind.NotEq, [a, b])
def and_(a, b):
return OperationDoc(OperationKind.And, [a, b])
def or_(a, b):
return OperationDoc(OperationKind.Or, [a, b])
def if_then_else(a, b, c):
return OperationDoc(OperationKind.IfThenElse, [a, b, c])
test_cases = {
"attr-call-index": [
(
add(x, y).attr("test"),
"(x + y).test",
),
(
add(x, y.attr("test")),
"x + y.test",
),
(
x[z].call(y),
"x[z](y)",
),
(
x.call(y)[z],
"x(y)[z]",
),
(
x.call(y).call(z),
"x(y)(z)",
),
(
x.call(y).attr("test"),
"x(y).test",
),
(
x.attr("test").call(y),
"x.test(y)",
),
(
x.attr("test").attr("test2"),
"x.test.test2",
),
(
LambdaDoc([x], x).call(y),
"(lambda x: x)(y)",
),
(
add(x, y)[z][add(z, z)].attr("name"),
"(x + y)[z][z + z].name",
),
],
"power": [
(
pow(pow(x, y), z),
"(x ** y) ** z",
),
(
pow(x, pow(y, z)),
"x ** y ** z",
),
(
pow(negative(x), negative(y)),
"(-x) ** -y",
),
(
pow(add(x, y), add(y, z)),
"(x + y) ** (y + z)",
),
],
"unary": [
(
invert(negative(y)),
"~-y",
),
(
negative(y).attr("test"),
"(-y).test",
),
(
negative(y.attr("test")),
"-y.test",
),
(
mult(negative(x), negative(y)),
"-x * -y",
),
(
negative(add(invert(x), negative(y))),
"-(~x + -y)",
),
],
"add-mult": [
(
mult(x, mult(y, z)),
"x * (y * z)",
),
(
mult(mult(x, y), z),
"x * y * z",
),
(
mult(x, add(y, z)),
"x * (y + z)",
),
(
mult(add(y, z), x),
"(y + z) * x",
),
(
add(x, mod(y, z)),
"x + y % z",
),
(
add(mult(y, z), x),
"y * z + x",
),
(
add(add(x, y), add(y, z)),
"x + y + (y + z)",
),
(
div(add(x, y), add(y, z)),
"(x + y) / (y + z)",
),
],
"shift": [
(
div(x, lshift(y, z)),
"x / (y << z)",
),
(
mult(lshift(y, z), x),
"(y << z) * x",
),
(
lshift(x, mult(y, z)),
"x << y * z",
),
(
lshift(mult(x, y), z),
"x * y << z",
),
(
lshift(mult(x, y), z),
"x * y << z",
),
(
lshift(lshift(x, y), z),
"x << y << z",
),
(
lshift(x, lshift(y, z)),
"x << (y << z)",
),
],
"bitwise": [
(
add(bit_or(x, y), bit_or(y, z)),
"(x | y) + (y | z)",
),
(
bit_and(bit_or(x, y), bit_or(y, z)),
"(x | y) & (y | z)",
),
(
bit_or(bit_and(x, y), bit_and(y, z)),
"x & y | y & z",
),
(
bit_and(bit_xor(x, bit_or(y, z)), z),
"(x ^ (y | z)) & z",
),
],
"comparison": [
(
not_eq(add(x, y), z),
"x + y != z",
),
(
eq(pow(x, y), z),
"x ** y == z",
),
(
lt(x, div(y, z)),
"x < y / z",
),
(
lt(x, if_then_else(y, y, y)),
"x < (y if y else y)",
),
],
"boolean": [
(
not_(and_(x, y)),
"not (x and y)",
),
(
and_(not_(x), y),
"not x and y",
),
(
and_(or_(x, y), z),
"(x or y) and z",
),
(
or_(x, or_(y, z)),
"x or (y or z)",
),
(
or_(or_(x, y), z),
"x or y or z",
),
(
or_(and_(x, y), z),
# Maybe we should consider adding parentheses here
# for readability, even though it's not necessary.
"x and y or z",
),
(
and_(or_(not_(x), y), z),
"(not x or y) and z",
),
(
and_(lt(x, y), lt(y, z)),
"x < y and y < z",
),
(
or_(not_(eq(x, y)), lt(y, z)),
# Same as the previous one, the code here is not
# readable without parentheses.
"not x == y or y < z",
),
(
and_(if_then_else(x, y, z), x),
"(y if x else z) and x",
),
(
not_(if_then_else(x, y, z)),
"not (y if x else z)",
),
],
"if-then-else": [
(
if_then_else(x, if_then_else(y, y, y), z),
"y if y else y if x else z",
),
(
if_then_else(if_then_else(x, x, x), y, z),
"y if (x if x else x) else z",
),
(
if_then_else(x, y, if_then_else(z, z, z)),
"y if x else (z if z else z)",
),
(
if_then_else(lt(x, x), add(y, y), mult(z, z)),
"y + y if x < x else z * z",
),
(
if_then_else(LambdaDoc([x], x), LambdaDoc([y], y), LambdaDoc([z], z)),
"(lambda y: y) if (lambda x: x) else (lambda z: z)",
),
],
"lambda": [
(
LambdaDoc([x, y], add(z, z)),
"lambda x, y: z + z",
),
(
add(LambdaDoc([x, y], z), z),
"(lambda x, y: z) + z",
),
(
LambdaDoc([x, y], add(z, z)).call(x, y),
"(lambda x, y: z + z)(x, y)",
),
(
LambdaDoc([x], LambdaDoc([y], z)),
"lambda x: lambda y: z",
),
],
}
return [
pytest.param(*args, id=f"{group_name}-{i}")
for group_name, cases in test_cases.items()
for i, args in enumerate(cases)
]
@pytest.mark.parametrize("doc, expected", generate_expr_precedence_test_cases())
def test_expr_precedence(doc, expected):
assert to_python_script(doc) == format_script(expected)
if __name__ == "__main__":
tvm.testing.main()
| 35,481 | 23.219795 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_ptx_ldmatrix.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_ldmatrix(
A: T.Buffer((16, 16), "float16"), B: T.Buffer((16, 16), "float16"), num: T.int32, trans: T.uint8
) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([16, 16], "float16", scope="shared")
A_local = T.alloc_buffer([8], "float16", scope="local")
for i in range(8):
A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]
T.evaluate(
T.ptx_ldmatrix(
trans,
num,
".b16",
A_local.data,
0,
A_shared.data,
16 * (tx % 16) + 8 * (tx // 16),
dtype="float16",
)
)
for k in range(2):
for j in range(2):
for i in range(2):
B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]
@tvm.testing.requires_cuda_compute_version(7, 5)
def test_ptx_ldmatrix():
f = ptx_ldmatrix
_, _, param_num, param_trans = f.params
for num in [1, 2, 4]:
for trans in [False, True]:
mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target="cuda")
A_np = np.random.rand(16, 16).astype("float16")
A_mask_np = np.zeros_like(A_np)
if num == 1:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
else:
A_mask_np[:8, :8] = A_np[:8, :8]
elif num == 2:
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
else:
A_mask_np[:16, :8] = A_np[:16, :8]
else: # num == 4
if trans:
A_mask_np[:8, :8] = A_np[:8, :8].T
A_mask_np[8:16, :8] = A_np[8:16, :8].T
A_mask_np[:8, 8:16] = A_np[:8, 8:16].T
A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T
else:
A_mask_np[:16, :16] = A_np[:16, :16]
B_np = np.zeros((16, 16)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)
if __name__ == "__main__":
test_ptx_ldmatrix()
| 3,500 | 34.72449 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_arith_rewrite_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import pytest
import tvm
from tvm import te, tir
from tvm.tir import truncdiv as tdiv, truncmod as tmod, floordiv as fld, floormod as flm
class TestCase:
def __init__(self, before, expected, preconditions=None):
if isinstance(before, tir.expr.EqualOp):
before = before.asobject()
if isinstance(expected, tir.expr.EqualOp):
expected = expected.asobject()
self.before = before
self.expected = expected
self.preconditions = preconditions
@property
def constraint(self):
if self.preconditions is None:
return True
elif isinstance(self.preconditions, tvm.ir.PrimExpr):
return self.preconditions
else:
return tvm.tir.all(*self.preconditions)
@property
def __name__(self):
return str(self.before)
class BaseCompare:
def test_simplify(self, test_case):
analyzer = tvm.arith.Analyzer()
if inspect.isclass(test_case.expected) and issubclass(test_case.expected, Exception):
with pytest.raises(test_case.expected):
with analyzer.constraint_scope(test_case.constraint):
analyzer.rewrite_simplify(test_case.before)
else:
with analyzer.constraint_scope(test_case.constraint):
after = analyzer.rewrite_simplify(test_case.before)
assert tvm.ir.structural_equal(after, test_case.expected), (
f"Rewrite didn't match expected.\n"
f"Before = {test_case.before}\n"
f"After = {after}\n"
f"Expected = {test_case.expected}"
)
class TestVector(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
vx = te.var("vx", dtype="int32x2")
vc = te.var("vc", dtype="uint1")
test_case = tvm.testing.parameter(
# Add rules
TestCase(tvm.tir.Ramp(x, 1, 4) + tvm.tir.Ramp(y, 2, 4), tvm.tir.Ramp(x + y, 3, 4)),
TestCase(tvm.tir.Ramp(x, 1, 2) + y, tvm.tir.Ramp(x + y, 1, 2)),
TestCase(y + tvm.tir.Ramp(x, 1, 2), tvm.tir.Ramp(y + x, 1, 2)),
TestCase(y.astype("int32x2") + x.astype("int32x2"), (y + x).astype("int32x2")),
TestCase(tvm.tir.Broadcast(0, 4) + y, tvm.tir.Broadcast(y, 4)),
TestCase(
tvm.tir.Ramp(x, 1, 4).astype("float32x4") + tvm.tir.Broadcast(0.0, 4),
tvm.tir.Ramp(x, 1, 4).astype("float32x4"),
),
# Sub rules
TestCase(tvm.tir.Ramp(x, 4, 4) - tvm.tir.Ramp(y, 2, 4), tvm.tir.Ramp(x - y, 2, 4)),
TestCase(tvm.tir.Ramp(x, 1, 2) - y, tvm.tir.Ramp(x - y, 1, 2)),
TestCase(y - tvm.tir.Ramp(x, 1, 2), tvm.tir.Ramp(y - x, -1, 2)),
TestCase(y.astype("int32x2") - x.astype("int32x2"), (y - x).astype("int32x2")),
# Mul rules
TestCase(y.astype("int32x2") * x.astype("int32x2"), (y * x).astype("int32x2")),
TestCase(tvm.tir.Ramp(x, 4, 4) * 2, tvm.tir.Ramp(x * 2, 8, 4)),
TestCase(2 * tvm.tir.Ramp(x, 4, 4), tvm.tir.Ramp(x * 2, 8, 4)),
TestCase(tvm.tir.Broadcast(0, 4) * x, tvm.tir.Broadcast(0, 4)),
TestCase(tvm.tir.Broadcast(0.0, 4) * x, tvm.tir.Broadcast(0.0, 4)),
## DivMod rules
# trunc div
TestCase(tdiv(y.astype("int32x2"), x.astype("int32x2")), tdiv(y, x).astype("int32x2")),
TestCase(tdiv(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Ramp(tdiv(x, 2), 2, 4)),
TestCase(tdiv(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), x.astype("int32x4"), x >= 0),
TestCase(tdiv(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8), tdiv(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8)),
# trunc mod
TestCase(tmod(y.astype("int32x2"), x.astype("int32x2")), tmod(y, x).astype("int32x2")),
TestCase(tmod(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Broadcast(tmod(x, 2), 4)),
TestCase(tmod(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), tvm.tir.Ramp(1, 1, 4), x >= 0),
TestCase(tmod(tvm.tir.Ramp(x * 8 + 1, 15, 4), 8), tmod(tvm.tir.Ramp(1, 15, 4), 8), x >= 0),
# floor div
TestCase(fld(y.astype("int32x2"), x.astype("int32x2")), fld(y, x).astype("int32x2")),
TestCase(fld(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Ramp(fld(x, 2), 2, 4)),
TestCase(fld(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), (x).astype("int32x4")),
TestCase(fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8), fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), 8)),
TestCase(
fld(tvm.tir.Ramp(x, 8, 5), tvm.tir.Broadcast(4, 5)), tvm.tir.Ramp(fld(x, 4), 2, 5)
),
TestCase(
fld(tvm.tir.Ramp(flm(x * 4, 256), 1, 4), tvm.tir.Broadcast(8, 4)),
tvm.tir.Broadcast(fld(flm(x * 4, 256), 8), 4),
),
TestCase(
fld(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
),
TestCase(
fld(tvm.tir.Ramp(x * 8, 1, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Broadcast(x * 2, 4)
),
TestCase(
fld(tvm.tir.Ramp(x * 8, 3, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x * 8, 3, 4), tvm.tir.Broadcast(4, 4)),
),
TestCase(
fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), tvm.tir.Broadcast(4, 4)),
fld(tvm.tir.Ramp(x * 8 + 15, 1, 4), tvm.tir.Broadcast(4, 4)),
),
TestCase(
fld(tvm.tir.Ramp(x * 4, 1, 4), tvm.tir.Broadcast(64, 4)),
tvm.tir.Broadcast(fld(x, 16), 4),
),
TestCase(
fld(tvm.tir.Ramp(x * 8, 2, 4), tvm.tir.Broadcast(64, 4)),
tvm.tir.Broadcast(fld(x, 8), 4),
),
TestCase(
fld(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
fld(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
), # Example negative case: x = 15; [60, 61, 62, 63, 64] / 64 = [0, 0, 0, 0, 1]
TestCase(
fld(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
fld(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
), # Example negative case: x = 15; [63, 64, 65, 66] % 64 = [0, 1, 1, 1]
TestCase(
fld(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
fld(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
), # Example negative case: x = 9; [63, 70, 77, 84] % 64 = [0, 1, 1, 1]
# floor mod
TestCase(flm(y.astype("int32x2"), x.astype("int32x2")), flm(y, x).astype("int32x2")),
TestCase(flm(tvm.tir.Ramp(x, 4, 4), 2), tvm.tir.Broadcast(flm(x, 2), 4)),
TestCase(flm(tvm.tir.Ramp(x * 8 + 1, 1, 4), 8), tvm.tir.Ramp(1, 1, 4)),
TestCase(flm(tvm.tir.Ramp(x * 8 + 1, 15, 4), 8), flm(tvm.tir.Ramp(1, 15, 4), 8)),
TestCase(
flm(tvm.tir.Ramp(x, 8, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Broadcast(flm(x, 4), 4)
),
TestCase(
flm(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
flm(tvm.tir.Ramp(x, 7, 4), tvm.tir.Broadcast(4, 4)),
),
TestCase(flm(tvm.tir.Ramp(x * 8, 1, 4), tvm.tir.Broadcast(4, 4)), tvm.tir.Ramp(0, 1, 4)),
TestCase(
flm(tvm.tir.Ramp(x * 8, 1, 5), tvm.tir.Broadcast(4, 5)),
flm(tvm.tir.Ramp(0, 1, 5), tvm.tir.Broadcast(4, 5)),
),
TestCase(
flm(tvm.tir.Ramp(x * 8 + 7, 1, 4), tvm.tir.Broadcast(4, 4)),
flm(tvm.tir.Ramp(3, 1, 4), tvm.tir.Broadcast(4, 4)),
),
TestCase(
flm(tvm.tir.Ramp(x * 4, 1, 4), tvm.tir.Broadcast(64, 4)),
tvm.tir.Ramp(flm(x * 4, 64), 1, 4),
),
TestCase(
flm(tvm.tir.Ramp(x * 8, 2, 4), tvm.tir.Broadcast(64, 4)),
tvm.tir.Ramp(flm(x * 8, 64), 2, 4),
),
TestCase(
flm(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
flm(tvm.tir.Ramp(x * 4, 1, 5), tvm.tir.Broadcast(64, 5)),
), # Example negative case: x = 15; [60, 61, 62, 63, 64] % 64 = [60, 61, 62, 63, 0]
TestCase(
flm(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
flm(tvm.tir.Ramp(x * 4 + 3, 1, 4), tvm.tir.Broadcast(64, 4)),
), # Example negative case: x = 15; [63, 64, 65, 66] % 64 = [63, 0, 1, 2]
TestCase(
flm(tvm.tir.Ramp(x * 2, 1, 8), tvm.tir.Broadcast(20, 8)),
flm(tvm.tir.Ramp(x * 2, 1, 8), tvm.tir.Broadcast(20, 8)),
), # Example negative case: x = 9; [18, 19, 20, ..., 25] % 20 = [18, 19, 0, 1, ..., 5]
TestCase(
flm(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
flm(tvm.tir.Ramp(x * 7, 1, 4), tvm.tir.Broadcast(64, 4)),
), # Example negative case: x = 9; [63, 70, 77, 84] % 64 = [63, 6, 13, 20]
# Min/Max rules
TestCase(
tvm.te.min(y.astype("int32x2"), x.astype("int32x2")), tvm.te.min(y, x).astype("int32x2")
),
TestCase(
tvm.te.min(tvm.te.min(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.te.min(vx, tvm.te.min(y, x).astype("int32x2")),
),
TestCase(
tvm.te.max(y.astype("int32x2"), x.astype("int32x2")), tvm.te.max(y, x).astype("int32x2")
),
TestCase(
tvm.te.max(tvm.te.max(vx, y.astype("int32x2")), x.astype("int32x2")),
tvm.te.max(vx, tvm.te.max(y, x).astype("int32x2")),
),
## Logical rules
TestCase(y.astype("int32x2").equal(x.astype("int32x2")), (y.equal(x)).astype("uint1x2")),
TestCase(
tvm.tir.NE(y.astype("int32x2"), (x.astype("int32x2"))),
(tvm.tir.NE(y, x)).astype("uint1x2"),
),
TestCase(y.astype("int32x2") > x.astype("int32x2"), (x < y).astype("uint1x2")),
TestCase(y.astype("int32x2") >= x.astype("int32x2"), (x <= y).astype("uint1x2")),
TestCase(y.astype("int32x2") < x.astype("int32x2"), (y < x).astype("uint1x2")),
TestCase(y.astype("int32x2") <= x.astype("int32x2"), (y <= x).astype("uint1x2")),
TestCase(
tvm.tir.And(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.tir.And(y <= x, vc)).astype("uint1x2"),
),
TestCase(
tvm.tir.Or(y.astype("int32x2") <= x.astype("int32x2"), vc.astype("uint1x2")),
(tvm.tir.Or(y <= x, vc)).astype("uint1x2"),
),
)
class TestSelect(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
# Add rules
TestCase(
tvm.tir.Select(x < 0, y, 0) + tvm.tir.Select(x < 0, 1, z),
tvm.tir.Select(x < 0, y + 1, z),
),
TestCase(
tvm.tir.Select(x < 0, y, 1) - tvm.tir.Select(x < 0, 1, z),
tvm.tir.Select(x < 0, y + (-1), 1 - z),
),
TestCase(tvm.tir.Select(x < 0, y, z) - y, tvm.tir.Select(x < 0, 0, z - y)),
TestCase(tvm.tir.Select(x < 0, y, z) - z, tvm.tir.Select(x < 0, y - z, 0)),
TestCase(
tvm.te.min(tvm.tir.Select(x < 0, y, 0), tvm.tir.Select(x < 0, 1, z)),
tvm.tir.Select(x < 0, tvm.te.min(y, 1), tvm.te.min(0, z)),
),
TestCase(
tvm.te.max(tvm.tir.Select(x < 0, y, 0), tvm.tir.Select(x < 0, 1, z)),
tvm.tir.Select(x < 0, tvm.te.max(y, 1), tvm.te.max(0, z)),
),
TestCase(tvm.tir.Select(x * 3 + 1 != 0, y, z), y),
TestCase(tvm.tir.Select(x * 3 + 1 == 0, y, z), z),
TestCase(tvm.tir.Select(x > 0, y + 1, y + 1), y + 1),
)
class TestAddIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
TestCase(x + (y - x), y),
TestCase(x - (y + 1) + (y + 1), x),
TestCase((x - 10) + (10 - z), x - z),
TestCase((x - y) + (z - x), z - y),
TestCase(tvm.te.min(x, y - z) + z, tvm.te.min(x + z, y)),
TestCase(tvm.te.min(x - z, y) + z, tvm.te.min(x, y + z)),
TestCase(tvm.te.max(x, y - 10) + 10, tvm.te.max(x + 10, y)),
TestCase(tvm.te.max(x - 11, y) + 11, tvm.te.max(x, y + 11)),
TestCase(tvm.te.max(x, y * 2) + tvm.te.min(x, y * 2), x + y * 2),
TestCase(tvm.te.min(x, y * 2) + tvm.te.max(x, y * 2), x + y * 2),
TestCase(tvm.te.max(x, y + 2) + (-2), tvm.te.max(x + (-2), y)),
TestCase(tvm.te.min(x, y + 2) + (-2), tvm.te.min(x + (-2), y)),
TestCase(tvm.te.min(x + 2, y + 3) + (-2), tvm.te.min(x, y + 1)),
TestCase(tvm.te.max(0, 1 - x * 4) + x * 4, tvm.te.max(x * 4, 1)),
TestCase(tvm.te.max(2 - x * 4, 0) + x * 4, tvm.te.max(x * 4, 2)),
TestCase(tvm.te.min(0, 1 - x * 4) + x * 4, tvm.te.min(x * 4, 1)),
TestCase(tvm.te.min(2 - x * 4, 0) + x * 4, tvm.te.min(x * 4, 2)),
TestCase(x * y + x * 10, x * (y + 10)),
TestCase(y * x + x * 10, x * (y + 10)),
TestCase(y * x + 10 * x, x * (y + 10)),
TestCase(x * y + 10 * x, x * (y + 10)),
TestCase((2 * z) + tvm.te.min(x, y - (2 * z)), tvm.te.min(x + (z * 2), y)),
TestCase(y * x + x, x * (y + 1)),
TestCase(x * y + x, x * (y + 1)),
TestCase((x + 10) + 13, x + 23),
TestCase((x + 10) + (13 + z), x + z + 23),
TestCase(x * y + 10 * x, x * (y + 10)),
TestCase(y * x + x * 3, x * (y + 3)),
TestCase(x + 3 + y, x + y + 3),
TestCase((3 - y) + x, x - y + 3),
# canonicalization
TestCase(x + 2 + 3 + 4 + x, x * 2 + 9),
TestCase(x + 2 + 3 + 4 + x * 3, x * 4 + 9),
# DivMod rules
# trunc div
TestCase(y * tmod(x, 8) + 10 * tmod(x, 8), tmod(x, 8) * (y + 10)),
TestCase(tdiv(x, 8) * 8 + tmod(x, 8), x),
# floor div
TestCase(y * flm(x, 8) + 10 * flm(x, 8), flm(x, 8) * (y + 10)),
TestCase(fld(x, 8) * 8 + flm(x, 8), x),
TestCase(fld(flm(x, 2) + 7, 2) + fld(x, 2), fld(x + 7, 2)),
)
class TestSubIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
a, b = tvm.tir.Any(), tvm.tir.Any()
test_case = tvm.testing.parameter(
TestCase(x + y - y, x),
TestCase(x + y - x, y),
TestCase(x - (y + x), 0 - y),
TestCase(x - (x + y), 0 - y),
TestCase(tvm.te.min(x, y) - x, tvm.te.min(0, y - x)),
TestCase(tvm.te.min(x, y) - y, tvm.te.min(x - y, 0)),
TestCase(tvm.te.max(x, y) - x, tvm.te.max(0, y - x)),
TestCase(tvm.te.max(x, y) - y, tvm.te.max(x - y, 0)),
TestCase(x - tvm.te.min(x, y), tvm.te.max(0, x - y)),
TestCase(y - tvm.te.min(x, y), tvm.te.max(y - x, 0)),
TestCase(x - tvm.te.max(x, y), tvm.te.min(0, x - y)),
TestCase(y - tvm.te.max(x, y), tvm.te.min(y - x, 0)),
# mul co-efficient foldng
TestCase(x - x, 0),
TestCase(a - a, 0),
TestCase(a - b, a - b),
TestCase(x * y - x, x * (y + (-1))),
TestCase(x * y - 10 * x, x * (y + (-10))),
TestCase(y * x - x * z, x * (y - z)),
TestCase(y * x - z * x, x * (y - z)),
TestCase(x + 10 - 20, x + (-10)),
# 4-operands pattern
TestCase((x + y) - (x + z), y - z),
TestCase((y + x) - (x + z), y - z),
TestCase((x + y) - (z + x), y - z),
TestCase((y + x) - (z + x), y - z),
TestCase(tvm.te.min(x + y, z) - x, tvm.te.min(y, z - x)),
TestCase(tvm.te.min(y + x, z) - x, tvm.te.min(y, z - x)),
TestCase(tvm.te.min(z, x + y) - x, tvm.te.min(z - x, y)),
TestCase(tvm.te.min(z, y + x) - x, tvm.te.min(z - x, y)),
TestCase(tvm.te.max(x + y, z) - x, tvm.te.max(y, z - x)),
TestCase(tvm.te.max(y + x, z) - x, tvm.te.max(y, z - x)),
TestCase(tvm.te.max(z, x + y) - x, tvm.te.max(z - x, y)),
TestCase(tvm.te.max(z, y + x) - x, tvm.te.max(z - x, y)),
TestCase(x - tvm.te.min(x + y, z), tvm.te.max(0 - y, x - z)),
TestCase(x - tvm.te.min(y + x, z), tvm.te.max(0 - y, x - z)),
TestCase(x - tvm.te.min(z, x + y), tvm.te.max(x - z, 0 - y)),
TestCase(x - tvm.te.min(z, y + x), tvm.te.max(x - z, 0 - y)),
TestCase(tvm.te.min(x, y) - tvm.te.min(y, x), 0),
TestCase(tvm.te.max(x, y) - tvm.te.max(y, x), 0),
TestCase(tvm.te.min(x, y) - tvm.te.min(x + 10, y + 10), -10),
TestCase(tvm.te.min(x + 10, y + 1) - tvm.te.min(x, y - 9), 10),
TestCase(x - tvm.te.max(x + y, 0), tvm.te.min(0 - y, x)),
TestCase(x - tvm.te.max(0, x + y), tvm.te.min(x, 0 - y)),
TestCase(x - tvm.te.min(x + y, 0), tvm.te.max(0 - y, x)),
TestCase(x - tvm.te.min(0, x + y), tvm.te.max(x, 0 - y)),
# DivMod patterns
# truc div
TestCase(x - tdiv(x, 3) * 3, tmod(x, 3)),
TestCase(tdiv(x + 5, 3) - tdiv(x, 3), tdiv(tmod(x, 3) + 5, 3), x >= 0),
TestCase(tdiv(x + 5, 3) - tdiv(x + 1, 3), tdiv(tmod(x + 1, 3) + 4, 3), x >= -1),
TestCase(y - tdiv(y, (-5)) * (-5), tmod(y, 5)),
TestCase(tdiv(y, 3) * 3 - y, 0 - tmod(y, 3)),
TestCase(y - tdiv(y - 6, 5) * 5, tmod(y + (-6), 5) + 6),
TestCase(tdiv(y - 6, 5) * 5 - y, (-6) - tmod(y + (-6), 5)),
TestCase(y - tdiv(y + z, 5) * 5, tmod(y + z, 5) - z),
TestCase(tdiv(y + z, 5) * 5 - y, z - tmod(y + z, 5)),
TestCase(y - tdiv(y - z, 5) * 5, tmod(y - z, 5) + z),
TestCase(tdiv(y - z, 5) * 5 - y, 0 - tmod(y - z, 5) - z),
TestCase(y * 3 - tdiv(y, 2) * 6, tmod(y, 2) * 3),
TestCase(tdiv(y, 3) * 6 - y * 2, tmod(y, 3) * (-2)),
TestCase(y * 5 - tdiv(y + z, 2) * 10, (tmod(y + z, 2) - z) * 5),
TestCase(y * 5 - tdiv(y - z, 2) * 10, (tmod(y - z, 2) + z) * 5),
TestCase(tdiv(y + z, 3) * 6 - y * 2, (z - tmod(y + z, 3)) * 2),
TestCase(tdiv(y - z, 3) * 6 - y * 2, (0 - tmod(y - z, 3) - z) * 2),
TestCase(5 * y - tdiv(y + z, 2) * 10, (tmod(y + z, 2) - z) * 5),
TestCase(5 * y - 10 * tdiv(y - z, 2), (tmod(y - z, 2) + z) * 5),
TestCase(6 * tdiv(y + z, 3) - y * 2, (z - tmod(y + z, 3)) * 2),
TestCase(tdiv(y - z, 3) * 6 - 2 * y, (0 - tmod(y - z, 3) - z) * 2),
# floor div
TestCase(x - fld(x, 3) * 3, flm(x, 3)),
TestCase(fld(x + 5, 3) - fld(x, 3), fld(flm(x, 3) + 5, 3)),
TestCase(fld(x + 5, 3) - fld(x + 2, 3), fld(flm(x + 2, 3), 3) + 1),
TestCase(fld(y, 3) * 3 - y, 0 - flm(y, 3)),
TestCase(y - fld(y - 6, 5) * 5, flm(y + 4, 5) + 6),
TestCase(fld(y - 6, 5) * 5 - y, (-6) - flm(y + 4, 5)),
TestCase(y - fld(y + z, 5) * 5, flm(y + z, 5) - z),
TestCase(fld(y + z, 5) * 5 - y, z - flm(y + z, 5)),
TestCase(y - fld(y - z, 5) * 5, flm(y - z, 5) + z),
TestCase(fld(y - z, 5) * 5 - y, 0 - flm(y - z, 5) - z),
TestCase(y * 3 - fld(y, 2) * 6, flm(y, 2) * 3),
TestCase(fld(y, 3) * 6 - y * 2, flm(y, 3) * (-2)),
TestCase(y * 5 - fld(y + z, 2) * 10, (flm(y + z, 2) - z) * 5),
TestCase(y * 5 - fld(y - z, 2) * 10, (flm(y - z, 2) + z) * 5),
TestCase(fld(y + z, 3) * 6 - y * 2, (z - flm(y + z, 3)) * 2),
TestCase(fld(y - z, 3) * 6 - y * 2, (0 - flm(y - z, 3) - z) * 2),
TestCase(5 * y - fld(y + z, 2) * 10, (flm(y + z, 2) - z) * 5),
TestCase(5 * y - 10 * fld(y - z, 2), (flm(y - z, 2) + z) * 5),
TestCase(6 * fld(y + z, 3) - y * 2, (z - flm(y + z, 3)) * 2),
TestCase(fld(y - z, 3) * 6 - 2 * y, (0 - flm(y - z, 3) - z) * 2),
)
class TestMulIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
TestCase((x + 2) * 3, x * 3 + 6),
TestCase((x * 2) * 3, x * 6),
TestCase(tvm.te.min(x, y) * tvm.te.max(x, y), x * y),
TestCase(tvm.te.max(x, y) * tvm.te.min(x, y), x * y),
TestCase((x - y) * (-2), (y - x) * 2),
)
class TestDivIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
non_negative = [x >= 0, y >= 0, z >= 0]
test_case = tvm.testing.parameter(
TestCase(tdiv(x, x), 1),
TestCase(tdiv(tdiv(x, 2), 3), tdiv(x, 6)),
TestCase(tdiv(tdiv(x, 2) + 1, 3), tdiv(x + 2, 6), non_negative),
TestCase(tdiv(x * 2, 4), tdiv(x, 2)),
TestCase(tdiv(x * 4, 2), x * 2),
TestCase(tdiv(x * 4 + y, 2), x * 2 + tdiv(y, 2), non_negative),
TestCase(tdiv(tvm.te.min(x * 6, y), 2), tvm.te.min(x * 3, tdiv(y, 2)), non_negative),
TestCase(tdiv(tvm.te.max(x * 6, y), 2), tvm.te.max(x * 3, tdiv(y, 2)), non_negative),
TestCase(tdiv(y + x * 4, 2), tdiv(y, 2) + x * 2, non_negative),
TestCase(tdiv(tvm.te.min(y, x * 6), 2), tvm.te.min(tdiv(y, 2), x * 3), non_negative),
TestCase(tdiv(tvm.te.max(y, x * 6), 2), tvm.te.max(tdiv(y, 2), x * 3), non_negative),
# 3-operands
TestCase(tdiv(x * 6 + y + z, 2), x * 3 + tdiv(y + z, 2), non_negative),
TestCase(tdiv(x * 6 - y + (y + 3), 2), x * 3 + 1, non_negative),
TestCase(tdiv(x * 6 + (y + 3) - y, 2), x * 3 + 1, non_negative),
TestCase(tdiv(y + x * 6 + z, 2), x * 3 + tdiv(y + z, 2), non_negative),
TestCase(tdiv(x + 4, 2), tdiv(x, 2) + 2, non_negative),
TestCase(tdiv(x + y, x), tdiv(y, x) + 1, non_negative),
TestCase(tdiv(y + x, x), tdiv(y, x) + 1, non_negative),
TestCase(tdiv((x + y) + z, x), tdiv(y + z, x) + 1, non_negative),
TestCase(tdiv((y + x) + z, x), tdiv(y + z, x) + 1, non_negative),
TestCase(tdiv(y + (x + z), x), tdiv(y + z, x) + 1, non_negative),
TestCase(tdiv(y + (z + x), x), tdiv(y + z, x) + 1, non_negative),
TestCase(tdiv(x * y, y), x, non_negative),
TestCase(tdiv(y * x, y), x, non_negative),
TestCase(tdiv(x * z + y, z), x + tdiv(y, z), non_negative),
TestCase(tdiv(z * x + y, z), x + tdiv(y, z), non_negative),
TestCase(tdiv(y + x * z, z), tdiv(y, z) + x, non_negative),
TestCase(tdiv(y + z * x, z), tdiv(y, z) + x, non_negative),
)
class TestFloordivIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
TestCase(fld(fld(x, 2), 3), fld(x, 6)),
TestCase(fld(fld(x, 2) + 1, 3), fld(x + 2, 6)),
TestCase(fld(x - flm(x, 21), 21), fld(x, 21)),
TestCase(fld(x * 2, 4), fld(x, 2)),
TestCase(fld(x * 4, 2), x * 2),
TestCase(fld(x * 8 + 7, 16), fld(x, 2)),
TestCase(fld(x * 8 + 39, 16), fld(x, 2) + 2),
TestCase(fld(x * 8 - 1, 16), fld(x * 8 + -1, 16)),
TestCase(fld(x * 8 - 9, 16), fld(x, 2) + -1),
# TODO(Lunderberg): Remove the necessity for the preconditions
# in this section. They shouldn't be necessary for floordiv,
# where they would be required for truncdiv.
TestCase(fld(x * 360 + y, 16), x * 22, [x >= 0, x < 2, y >= 0, y < 7]),
TestCase(fld(x * 360 + y, 25), x * 14, [x >= 0, x < 2, y >= 0, y < 7]),
TestCase(fld(x * 360 - 8, 25), fld(x * 360 + -8, 25)),
TestCase(fld(x * 4 + y, 2), x * 2 + fld(y, 2)),
TestCase(fld(tvm.te.min(x * 6, y), 2), tvm.te.min(x * 3, fld(y, 2))),
TestCase(fld(tvm.te.max(x * 6, y), 2), tvm.te.max(x * 3, fld(y, 2))),
TestCase(fld(y + x * 4, 2), x * 2 + fld(y, 2)),
TestCase(fld(tvm.te.min(y, x * 6), 2), tvm.te.min(fld(y, 2), x * 3)),
TestCase(fld(tvm.te.max(y, x * 6), 2), tvm.te.max(fld(y, 2), x * 3)),
# 3-operands
#
# TODO(Lunderberg): Remove the necessity for the preconditions
# in this section. They shouldn't be required, since floordiv
# has translational symmetry, even for negative.
TestCase(fld(x * 6 + y + z, 2), x * 3 + fld(y + z, 2)),
TestCase(fld(x * 6 - y + (y + 3), 2), x * 3 + 1),
TestCase(fld(x * 6 + (y + 3) - y, 2), x * 3 + 1),
TestCase(fld(y + x * 6 + z, 2), x * 3 + fld(y + z, 2)),
TestCase(fld(x + 4, 2), fld(x, 2) + 2),
TestCase(fld(x + y, x), fld(y, x) + 1, x >= 0),
TestCase(fld(y + x, x), fld(y, x) + 1, x >= 0),
TestCase(fld((x + y) + z, x), fld(y + z, x) + 1, x >= 0),
TestCase(fld((y + x) + z, x), fld(y + z, x) + 1, x >= 0),
TestCase(fld(y + (x + z), x), fld(y + z, x) + 1, x >= 0),
TestCase(fld(y + (z + x), x), fld(y + z, x) + 1, x >= 0),
TestCase(fld(x * y, y), x, y >= 0),
TestCase(fld(y * x, y), x, y >= 0),
TestCase(fld(x * z + y, z), x + fld(y, z), z >= 0),
TestCase(fld(z * x + y, z), x + fld(y, z), z >= 0),
TestCase(fld(y + x * z, z), fld(y, z) + x, z >= 0),
TestCase(fld(y + z * x, z), fld(y, z) + x, z >= 0),
TestCase(fld(x * 32 + y, 64), fld(x, 2), [y >= 0, y < 32]),
TestCase(fld(x * 128 + y * 4 + z, 512), fld(x, 4), [y >= 0, y < 32, z >= 0, z < 4]),
)
class TestModIndex(BaseCompare):
x, y, nx, ny, z = te.var("x"), te.var("y"), te.var("nx"), te.var("ny"), te.var("z")
test_case = tvm.testing.parameter(
# TODO(Lunderberg): Loosen these preconditions. When there's
# a single term whose factor is divisible by the denominator,
# the sign of the argument doesn't matter.
TestCase(tmod(x * 10, 2), 0, x >= 0),
TestCase(tmod(x * 10 + y, 2), tmod(y, 2), [x >= 0, y >= 0]),
TestCase(tmod(x + 10, 2), tmod(x, 2), x >= 0),
TestCase(tmod(x + y * 10, 2), tmod(x, 2), [x >= 0, y >= 0]),
TestCase(tmod(x * 10 + 1 + y * 2 + 2, 2), 1, [x >= 0, y >= 0]),
TestCase(tmod(x * 10, -2), 0, x <= 0),
TestCase(tmod(x * 10 + y, -2), tmod(y, 2), [x >= 0, y >= 0]),
TestCase(tmod(x + 10, -2), tmod(x, 2), x >= 0),
TestCase(tmod(x + y * 10, -2), tmod(x, 2), [x >= 0, y >= 0]),
TestCase(tmod(x * 10 + 1 + y * 2 + 2, -2), 1, [x >= 0, y >= 0]),
TestCase(tmod(x * (-10), 2), 0),
TestCase(tmod(x * (-10) + y, 2), tmod(x * (-10) + y, 2)),
TestCase(tmod(x + (-10), 2), tmod(x + (-10), 2)),
TestCase(tmod(x + y * (-10), 2), tmod(x + y * (-10), 2)),
TestCase(tmod(x * (-10), -2), 0),
TestCase(tmod(nx * 10, 2), 0),
TestCase(tmod(nx * (-10) + y, 2), tmod(y, 2), [nx <= 0, y >= 0]),
TestCase(tmod(x + ny * (-10), 2), tmod(x, 2), [x >= 0, ny <= 0]),
TestCase(tmod(nx * (-10) + 1 + ny * (-2) + 2, 2), 1, [nx <= 0, ny <= 0]),
TestCase(tmod(nx * 10, -2), 0),
TestCase(tmod(nx * (-10) + y, -2), tmod(y, 2), [nx <= 0, y >= 0]),
TestCase(tmod(x + ny * (-10), -2), tmod(x, 2), [x >= 0, ny <= 0]),
)
class TestFloormodIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
TestCase(flm(x * 10, 2), 0),
TestCase(flm(x * 9600, 6400), flm(x * 3200, 6400)),
TestCase(flm(x * 10 + y, 2), flm(y, 2)),
TestCase(flm(x * 360 + y, 16), flm(x * 8 + y, 16)),
TestCase(flm(x + 10, 2), flm(x, 2)),
TestCase(flm(x + y * 10, 2), flm(x, 2)),
TestCase(flm(x + y * 360, 16), flm(x + y * 8, 16)),
TestCase(flm(x * (-10), 2), 0),
TestCase(flm(x * (-10) + y, 2), flm(y, 2)),
TestCase(flm(x + (-10), 2), flm(x, 2)),
TestCase(flm(x + y * (-10), 2), flm(x, 2)),
TestCase(flm(x * 32 + y, 64), flm(x, 2) * 32 + y, [y >= 0, y < 32]),
TestCase(flm(x * 32 - y, 64), flm(x * 32 - y, 64), [y >= 0, y < 32]),
# NOTE: the followng case is covered by canonical simplify
# long range simplifcation in general can be covered by canonical simplify
# TestCase(flm(x * 10 + 1 + y * 2 + 2, 2), 1),
)
class TestFloorModTwo(BaseCompare):
"""Special-case simplifications for FloorMod(expr,2)
Because FloorMod(expr,2) has only two possible values, it can be
simplified more aggressively than most FloorMod expressions. Some
of these have analogues for other denominators (e.g. x%3 + (x+1)%3
+ (x+2)%3 == 0 + 1 + 2), but they don't appear as often and
require identifying more related terms in order to apply.
(x + c1)//2 - (x+c2)//2 => (x%2)*( c1%2 - c1%2 ) + (c1//2 - c2//2)
We should not introduce extra negative coeficient to iterators
however during simplification
"""
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
# Removing offsets from floormod
TestCase(flm(x, 2) + flm(x + 1, 2), 1),
TestCase(flm(x + 1, 2) + flm(x, 2), 1),
# Difference of floordiv yields floormod
TestCase(fld(x + 1, 2) - fld(x, 2), flm(x, 2)),
TestCase(fld(x, 2) - fld(x - 1, 2), flm(x, 2) * -1 + 1),
TestCase(fld(x + 5, 2) - fld(x - 2, 2), flm(x, 2) + 3),
TestCase(fld(x + 5, 2) - fld(x - 3, 2), 4),
TestCase(fld(flm(x, 2) + 1, 2), flm(x, 2)),
# Sum of floordiv and floormod to yield floordiv
TestCase(fld(x + 1, 2) - flm(x, 2), fld(x, 2)),
TestCase(fld(x, 2) + flm(x, 2), fld(x + 1, 2)),
# regression: although we can rewrite (x + 1) %2 => 1 - x%2
# doing so would introduce negative co-efficient to iterators
# which makes later iter map detection harder, in principle we
# should not introduce additional negative signs of iterator in rewriting
TestCase(flm(x + 1, 2), flm(x + 1, 2)),
TestCase(flm(x + 5, 2), flm(x + 1, 2)),
TestCase(flm(x + 1, 2) * 8192, flm(x + 1, 2) * 8192, [x >= 0, x < 2]),
)
class TestMinIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
# const int bound
TestCase(tvm.te.min(tmod(x, 2), tmod(y, 2) + 10), tmod(x, 2)),
TestCase(tvm.te.min(flm(x, 2), flm(y, 2) + 10), flm(x, 2)),
TestCase(tvm.te.min(x + 1, x + 10), x + 1),
TestCase(tvm.te.min(x + 111, x + 10), x + 10),
TestCase(tvm.te.min(x + 1, x), x),
TestCase(tvm.te.min(x, x + 2), x),
TestCase(tvm.te.min(1 - x, 2 - x), 1 - x),
TestCase(tvm.te.min(3 - x, 2 - x), 2 - x),
TestCase(tvm.te.min(tvm.te.max(x, y), tvm.te.min(x, y)), tvm.te.min(x, y)),
TestCase(tvm.te.min(tvm.te.max(x, y), tvm.te.min(y, x)), tvm.te.min(x, y)),
TestCase(tvm.te.min(tvm.te.max(x, y), x), x),
TestCase(tvm.te.min(tvm.te.max(y, x), x), x),
TestCase(tvm.te.min(tvm.te.min(x, y), x), tvm.te.min(x, y)),
TestCase(tvm.te.min(tvm.te.min(x, y), y), tvm.te.min(x, y)),
TestCase(tvm.te.min(x, tvm.te.max(x, y)), x),
TestCase(tvm.te.min(x, tvm.te.max(y, x)), x),
TestCase(tvm.te.min(x, tvm.te.min(x, y)), tvm.te.min(x, y)),
TestCase(tvm.te.min(y, tvm.te.min(x, y)), tvm.te.min(x, y)),
TestCase(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), y), tvm.te.min(tvm.te.min(x, y), z)),
TestCase(
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), y),
tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2),
),
TestCase(
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), z * 2), y),
tvm.te.min(tvm.te.min(tvm.te.min(tvm.te.min(x, y), z), x * 2), z * 2),
),
TestCase(tvm.te.min(tvm.te.max(x, y), tvm.te.max(x, z)), tvm.te.max(tvm.te.min(y, z), x)),
TestCase(tvm.te.min(tvm.te.max(x, y), tvm.te.max(z, x)), tvm.te.max(tvm.te.min(y, z), x)),
TestCase(tvm.te.min(tvm.te.max(y, x), tvm.te.max(x, z)), tvm.te.max(tvm.te.min(y, z), x)),
TestCase(tvm.te.min(tvm.te.max(y, x), tvm.te.max(z, x)), tvm.te.max(tvm.te.min(y, z), x)),
TestCase(tvm.te.min(y + x, z + x), tvm.te.min(y, z) + x),
TestCase(tvm.te.min(y + x, x + z), tvm.te.min(y, z) + x),
TestCase(tvm.te.min(x + y, z + x), tvm.te.min(y, z) + x),
TestCase(tvm.te.min(x + y, x + z), tvm.te.min(y, z) + x),
TestCase(tvm.te.min(x - y, x - z), x - tvm.te.max(y, z)),
TestCase(tvm.te.min(y - x, z - x), tvm.te.min(y, z) - x),
TestCase(tvm.te.min(tvm.te.min(x, 1), 10), tvm.te.min(x, 1)),
TestCase(tvm.te.min(tvm.te.min(x, 11), 10), tvm.te.min(x, 10)),
TestCase(tvm.te.min(x * 3, 9), tvm.te.min(x, 3) * 3),
TestCase(tvm.te.min(x * 2, 0), tvm.te.min(x, 0) * 2),
TestCase(tvm.te.min(0 - x * 2, 0), tvm.te.max(x, 0) * -2),
TestCase(tvm.te.min(3 - x, 2), 3 - tvm.te.max(x, 1)),
TestCase(tvm.te.min(x * (-2), -4), tvm.te.max(x, 2) * -2),
TestCase(tvm.te.min(x * (-2), 4), tvm.te.max(x, -2) * -2),
TestCase(tvm.te.min(x * (0), 4), 0),
TestCase(tvm.te.min(x * (0), -4), -4),
# DivMod rules
# truc div
TestCase(tvm.te.min(tdiv(x + 3, 4) * 4, x), x),
TestCase(tvm.te.min(x, tdiv(x + 3, 4) * 4), x),
TestCase(tvm.te.min(tdiv(x + 3, 4) * 4, tvm.te.max(x, 4)), tvm.te.max(x, 4), x > 0),
TestCase(tvm.te.min(tvm.te.max(x, 4), tdiv(x + 3, 4) * 4), tvm.te.max(x, 4), x > 0),
TestCase(tvm.te.min(tdiv(x, 10), tdiv(y, 10)), tdiv(tvm.te.min(x, y), 10)),
TestCase(tvm.te.min(tdiv(x, (-10)), tdiv(y, (-10))), tdiv(tvm.te.max(x, y), (-10))),
# floor div
TestCase(tvm.te.min(fld(x + 3, 4) * 4, x), x),
TestCase(tvm.te.min(x, fld(x + 3, 4) * 4), x),
TestCase(tvm.te.min(x, fld(x, 4) * 4), fld(x, 4) * 4),
TestCase(tvm.te.min(fld(x + 3, 4) * 4, tvm.te.max(x, 4)), tvm.te.max(x, 4), x > 0),
TestCase(tvm.te.min(tvm.te.max(x, 4), fld(x + 3, 4) * 4), tvm.te.max(x, 4), x > 0),
TestCase(tvm.te.min(fld(x, 10), fld(y, 10)), fld(tvm.te.min(x, y), 10)),
TestCase(tvm.te.min(fld(x, (-10)), fld(y, (-10))), fld(tvm.te.max(x, y), (-10))),
)
class TestMaxIndex(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
# const int bound
TestCase(tvm.te.max(tmod(x, 2), tmod(y, 2) + 10), tmod(y, 2) + 10),
TestCase(tvm.te.max(flm(x, 2), flm(y, 2) + 10), flm(y, 2) + 10),
TestCase(tvm.te.max(x + 1, x + 10), x + 10),
TestCase(tvm.te.max(x + 111, x + 10), x + 111),
TestCase(tvm.te.max(x + 1, x), x + 1),
TestCase(tvm.te.max(x, x + 2), x + 2),
TestCase(tvm.te.max(1 - x, 2 - x), 2 - x),
TestCase(tvm.te.max(3 - x, 2 - x), 3 - x),
TestCase(tvm.te.max(tvm.te.min(x, y), tvm.te.max(x, y)), tvm.te.max(x, y)),
TestCase(tvm.te.max(tvm.te.min(x, y), tvm.te.max(y, x)), tvm.te.max(x, y)),
TestCase(tvm.te.max(tvm.te.min(x, y), x), x),
TestCase(tvm.te.max(tvm.te.min(y, x), x), x),
TestCase(tvm.te.max(tvm.te.max(x, y), x), tvm.te.max(x, y)),
TestCase(tvm.te.max(tvm.te.max(x, y), y), tvm.te.max(x, y)),
TestCase(tvm.te.max(x, tvm.te.min(x, y)), x),
TestCase(tvm.te.max(x, tvm.te.min(y, x)), x),
TestCase(tvm.te.max(x, tvm.te.max(x, y)), tvm.te.max(x, y)),
TestCase(tvm.te.max(y, tvm.te.max(x, y)), tvm.te.max(x, y)),
TestCase(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), y), tvm.te.max(tvm.te.max(x, y), z)),
TestCase(
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), y),
tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2),
),
TestCase(
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), z * 2), y),
tvm.te.max(tvm.te.max(tvm.te.max(tvm.te.max(x, y), z), x * 2), z * 2),
),
TestCase(tvm.te.max(tvm.te.min(x, y), tvm.te.min(x, z)), tvm.te.min(tvm.te.max(y, z), x)),
TestCase(tvm.te.max(tvm.te.min(x, y), tvm.te.min(z, x)), tvm.te.min(tvm.te.max(y, z), x)),
TestCase(tvm.te.max(tvm.te.min(y, x), tvm.te.min(x, z)), tvm.te.min(tvm.te.max(y, z), x)),
TestCase(tvm.te.max(tvm.te.min(y, x), tvm.te.min(z, x)), tvm.te.min(tvm.te.max(y, z), x)),
TestCase(tvm.te.max(y + x, z + x), tvm.te.max(y, z) + x),
TestCase(tvm.te.max(y + x, x + z), tvm.te.max(y, z) + x),
TestCase(tvm.te.max(x + y, z + x), tvm.te.max(y, z) + x),
TestCase(tvm.te.max(x + y, x + z), tvm.te.max(y, z) + x),
TestCase(tvm.te.max(x - y, x - z), x - tvm.te.min(y, z)),
TestCase(tvm.te.max(y - x, z - x), tvm.te.max(y, z) - x),
TestCase(tvm.te.max(tvm.te.max(x, 1), 10), tvm.te.max(x, 10)),
TestCase(tvm.te.max(tvm.te.max(x, 11), 10), tvm.te.max(x, 11)),
TestCase(tvm.te.max(x * 3, 9), tvm.te.max(x, 3) * 3),
TestCase(tvm.te.max(3 - x, 1), 3 - tvm.te.min(x, 2)),
TestCase(tvm.te.max(x * 2, 0), tvm.te.max(x, 0) * 2),
TestCase(tvm.te.max(0 - x * 2, 0), tvm.te.min(x, 0) * -2),
TestCase(tvm.te.max(x * (-2), -4), tvm.te.min(x, 2) * -2),
TestCase(tvm.te.max(x * (-2), 4), tvm.te.min(x, -2) * -2),
TestCase(tvm.te.max(x * (0), 4), 4),
TestCase(tvm.te.max(x * (0), -4), 0),
# DivMod rules
# truc div
TestCase(tvm.te.max(tdiv(x, 10), tdiv(y, 10)), tdiv(tvm.te.max(x, y), 10)),
TestCase(tvm.te.max(tdiv(x, (-10)), tdiv(y, (-10))), tdiv(tvm.te.min(x, y), (-10))),
TestCase(tvm.te.max(tdiv(x + 3, 4) * 4, x), tdiv(x + 3, 4) * 4),
# floordiv
TestCase(tvm.te.max(fld(x, 10), fld(y, 10)), fld(tvm.te.max(x, y), 10)),
TestCase(tvm.te.max(fld(x, (-10)), fld(y, (-10))), fld(tvm.te.min(x, y), (-10))),
TestCase(tvm.te.max(fld(x + 3, 4) * 4, x), fld(x + 3, 4) * 4),
TestCase(tvm.te.max(fld(x, 4) * 4, x), x),
TestCase(tvm.te.max(x, fld(x, 4) * 4), x),
)
class TestComparisons(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
# const int bound
TestCase((tmod(x, 2) + 10).equal(0), tvm.tir.const(0, "bool")),
TestCase(tvm.tir.NE(tmod(x, 2) + 10, 0), tvm.tir.const(1, "bool")),
TestCase(tmod(x, 2) + 10 > 1, tvm.tir.const(1, "bool")),
TestCase(tmod(x, 2) + 10 <= 1, tvm.tir.const(0, "bool")),
TestCase(flm(x, 2) + 2 > 1, tvm.tir.const(1, "bool")),
TestCase(flm(x, 2) + 10 <= 1, tvm.tir.const(0, "bool")),
TestCase(x * 3 + 10 == 0, tvm.tir.const(0, "bool")),
TestCase(x * 3 + 10 != 0, tvm.tir.const(1, "bool")),
# canonicalization
TestCase((x - 10).equal(0), x.equal(10)),
TestCase((10 - x).equal(0), x.equal(10)),
TestCase((x * y).equal(0), tvm.tir.Or(x.equal(0), y.equal(0))),
# Write LT as LE for integer arguments, if possible
TestCase(x - 1 < y, x <= y),
TestCase(x + (-1) < y, x <= y),
TestCase(x < y - (-1), x <= y),
TestCase(x < y + 1, x <= y),
TestCase(x + 2 < y + 3, x <= y),
TestCase(x - 3 < y - 2, x <= y),
TestCase(x - 3 < y + (-2), x <= y),
TestCase(x + (-3) < y - 2, x <= y),
# Merge constants on the LHS/RHS of a LT expression.
TestCase(x + 10 < y + 10, x < y),
TestCase(x + 5 < y + 10, x < y + 5),
TestCase(x + 10 < y + 5, x + 5 < y),
TestCase(x - 5 < y - 10, x + 5 < y),
TestCase(x - 10 < y - 5, x < y + 5),
TestCase(x < y - 10, x + 10 < y),
TestCase(x - 10 < y, x < y + 10),
# cmp bound
TestCase(x + y < x + z, y < z),
TestCase(x + y < z + x, y < z),
TestCase(y + x < x + z, y < z),
TestCase(y + x < z + x, y < z),
TestCase(y - x < z - x, y < z),
TestCase(x - y < x - z, z < y),
TestCase(x < z + x, tvm.tir.LT(0, z)),
TestCase(x < x + z, tvm.tir.LT(0, z)),
TestCase(100 < x + 1, tvm.tir.LT(99, x)),
TestCase(1 < 100 - x, tvm.tir.LT(x, 99)),
TestCase(x * 3 < y * 3, x < y),
TestCase(x * (-3) < y * (-3), y < x),
TestCase(x * 3 >= y * 3, y <= x),
TestCase(x * 4 >= 2, tvm.tir.LE(1, x)),
TestCase(x * 2 >= 50, tvm.tir.LE(25, x)),
TestCase(x * 4 <= 2, x <= 0),
TestCase((0 - x * 3) <= 0, tvm.tir.LE(0, x)),
TestCase((0 - x * 3) >= 0, tvm.tir.LE(x, 0)),
TestCase(2 * x <= 0, x <= 0),
TestCase(x * 2 >= 3, tvm.tir.LE(2, x)),
TestCase(x * 2 >= 2, tvm.tir.LE(1, x)),
TestCase(x * 2 >= 1, tvm.tir.LE(1, x)),
TestCase(x * 2 >= 0, tvm.tir.LE(0, x)),
TestCase(x * 2 >= -1, tvm.tir.LE(0, x)),
TestCase(x * 2 >= -2, tvm.tir.LE(-1, x)),
TestCase(x * 2 >= -3, tvm.tir.LE(-1, x)),
TestCase(x * 2 <= 3, tvm.tir.LE(x, 1)),
TestCase(x * 2 <= 2, tvm.tir.LE(x, 1)),
TestCase(x * 2 <= 1, tvm.tir.LE(x, 0)),
TestCase(x * 2 <= 0, tvm.tir.LE(x, 0)),
TestCase(x * 2 <= -1, tvm.tir.LE(x, -1)),
TestCase(x * 2 <= -2, tvm.tir.LE(x, -1)),
TestCase(x * 2 <= -3, tvm.tir.LE(x, -2)),
TestCase(x * (-2) >= 3, tvm.tir.LE(x, -2)),
TestCase(x * (-2) >= 2, tvm.tir.LE(x, -1)),
TestCase(x * (-2) >= 1, tvm.tir.LE(x, -1)),
TestCase(x * (-2) >= 0, tvm.tir.LE(x, 0)),
TestCase(x * (-2) >= -1, tvm.tir.LE(x, 0)),
TestCase(x * (-2) >= -2, tvm.tir.LE(x, 1)),
TestCase(x * (-2) >= -3, tvm.tir.LE(x, 1)),
TestCase(x * (-2) <= 3, tvm.tir.LE(-1, x)),
TestCase(x * (-2) <= 2, tvm.tir.LE(-1, x)),
TestCase(x * (-2) <= 1, tvm.tir.LE(0, x)),
TestCase(x * (-2) <= 0, tvm.tir.LE(0, x)),
TestCase(x * (-2) <= -1, tvm.tir.LE(1, x)),
TestCase(x * (-2) <= -2, tvm.tir.LE(1, x)),
TestCase(x * (-2) <= -3, tvm.tir.LE(2, x)),
# DivMod rules
# truc div
TestCase(tdiv(x, 2) < 3, x < 6),
TestCase(3 < tdiv(x, 2), tvm.tir.LT(7, x)),
TestCase(tdiv(x, 3) >= 0, tvm.tir.LE(-2, x)),
TestCase(tdiv(x, 2) >= 1, tvm.tir.LE(2, x)),
TestCase(tdiv(x, 2) >= 0, tvm.tir.LE(-1, x)),
TestCase(tdiv(x, 2) >= -1, tvm.tir.LE(-3, x)),
TestCase(tdiv(x, 2) <= 1, tvm.tir.LE(x, 3)),
TestCase(tdiv(x, 2) <= 0, tvm.tir.LE(x, 1)),
TestCase(tdiv(x, 2) <= -1, tvm.tir.LE(x, -2)),
TestCase(tdiv(x, 4) * 4 < x, tvm.tir.LT(0, tmod(x, 4))),
TestCase(tdiv(x, 4) * 4 >= x, tvm.tir.LE(tmod(x, 4), 0)),
TestCase(tdiv(x, 4) * 4 < x + y, tvm.tir.LT(0, tmod(x, 4) + y)),
TestCase(tdiv(x, 4) * 4 < x - y, tvm.tir.LT(y, tmod(x, 4))),
TestCase(tdiv(x + 2, 4) * 4 >= x, tvm.tir.LE(tmod(x + 2, 4), 2)),
TestCase(tdiv(x + 2, 4) * 4 >= x + y, tvm.tir.LE(tmod(x + 2, 4) + y, 2)),
TestCase(tdiv(x + 2, 4) * 4 >= x - y, tvm.tir.LE(tmod(x + 2, 4), y + 2)),
# floor div
TestCase(fld(x, 2) < 3, x < 6),
TestCase(3 < fld(x, 2), tvm.tir.LT(7, x)),
TestCase(-3 < fld(x, 2), tvm.tir.LT(-5, x)),
TestCase(fld(x, 3) >= 0, tvm.tir.LE(0, x)),
TestCase(fld(x, 2) >= 1, tvm.tir.LE(2, x)),
TestCase(fld(x, 2) >= 0, tvm.tir.LE(0, x)),
TestCase(fld(x, 2) >= -1, tvm.tir.LE(-2, x)),
TestCase(fld(x, 2) <= 1, tvm.tir.LE(x, 3)),
TestCase(fld(x, 2) <= 0, tvm.tir.LE(x, 1)),
TestCase(fld(x, 2) <= -1, tvm.tir.LE(x, -1)),
TestCase(fld(x, 4) * 4 < x, tvm.tir.LT(0, flm(x, 4))),
TestCase(fld(x, 4) * 4 >= x, tvm.tir.EQ(flm(x, 4), 0)),
TestCase(fld(x, 4) * 4 < x + y, tvm.tir.LT(0, flm(x, 4) + y)),
TestCase(fld(x, 4) * 4 < x - y, tvm.tir.LT(y, flm(x, 4))),
TestCase(fld(x + 2, 4) * 4 >= x, tvm.tir.LE(flm(x + 2, 4), 2)),
TestCase(fld(x + 2, 4) * 4 >= x + y, tvm.tir.LE(flm(x + 2, 4) + y, 2)),
TestCase(fld(x + 2, 4) * 4 >= x - y, tvm.tir.LE(flm(x + 2, 4), y + 2)),
# End DivMod Rules
# merging flm/fld into known value
TestCase(tir.all(fld(x, 8) == 3, flm(x, 8) == 4), x == 28),
TestCase(tir.all(flm(x, 8) == 4, fld(x, 8) == 3), x == 28),
TestCase(tir.all(fld(x, 8) == -3, flm(x, 8) == 4), x == -20),
TestCase(tir.all(flm(x, 8) == 4, fld(x, 8) == -3), x == -20),
# Rewrite based on definition of integer division
TestCase(tir.all(tvm.runtime.convert(0) <= x - y * 5, x - y * 5 < 5), y == fld(x, 5)),
TestCase(tir.all(x - y * 5 < 5, tvm.runtime.convert(0) <= x - y * 5), y == fld(x, 5)),
# Narrow upper bound using floormod
TestCase(tir.all(x < 20, flm(x, 5) < 2), tir.all(x < 17, flm(x, 5) < 2)),
TestCase(tir.all(x < 18, flm(x, 5) < 2), tir.all(x < 17, flm(x, 5) < 2)),
TestCase(tir.all(x <= 19, flm(x, 5) < 2), tir.all(x < 17, flm(x, 5) < 2)),
TestCase(tir.all(x <= 18, flm(x, 5) < 2), tir.all(x < 17, flm(x, 5) < 2)),
TestCase(tir.all(x < -20, flm(x, 5) < 2), tir.all(x < -23, flm(x, 5) < 2)),
TestCase(tir.all(x < 18 - 40, flm(x, 5) < 2), tir.all(x < 17 - 40, flm(x, 5) < 2)),
TestCase(tir.all(x <= -21, flm(x, 5) < 2), tir.all(x < -23, flm(x, 5) < 2)),
TestCase(tir.all(x <= -22, flm(x, 5) < 2), tir.all(x < -23, flm(x, 5) < 2)),
# No change if the floormod cannot help narrow the upper bound
TestCase(tir.all(x < 16, flm(x, 5) < 2), tir.all(x < 16, flm(x, 5) < 2)),
TestCase(tir.all(x <= 15, flm(x, 5) < 2), tir.all(x <= 15, flm(x, 5) < 2)),
# Merge a known floordiv and an upper bound of floormod into a value range
TestCase(
tir.all(fld(x, 10) == 5, flm(x, 10) < 7),
tir.all(tvm.runtime.convert(50) <= x, x < 57),
),
TestCase(
tir.all(fld(x, 10) == 5, flm(x, 10) <= 7),
tir.all(tvm.runtime.convert(50) <= x, x <= 57),
),
TestCase(
tir.all(fld(x, 10) == -5, flm(x, 10) < 7),
tir.all(tvm.runtime.convert(-50) <= x, x < -43),
),
TestCase(
tir.all(fld(x, 10) == -5, flm(x, 10) <= 7),
tir.all(tvm.runtime.convert(-50) <= x, x <= -43),
),
# Merge a known floordiv and an lower bound of floormod into a value range
TestCase(
tir.all(fld(x, 10) == 5, tvm.runtime.convert(7) < flm(x, 10)),
tir.all(tvm.runtime.convert(57) < x, x < 60),
),
TestCase(
tir.all(fld(x, 10) == 5, tvm.runtime.convert(7) <= flm(x, 10)),
tir.all(tvm.runtime.convert(57) <= x, x < 60),
),
TestCase(
tir.all(fld(x, 10) == -5, tvm.runtime.convert(7) < flm(x, 10)),
tir.all(tvm.runtime.convert(-43) < x, x < -40),
),
TestCase(
tir.all(fld(x, 10) == -5, tvm.runtime.convert(7) <= flm(x, 10)),
tir.all(tvm.runtime.convert(-43) <= x, x < -40),
),
TestCase(tvm.te.min(x, 11) < 10, x < 10),
TestCase(tvm.te.min(x, 8) < 10, tvm.tir.const(1, "bool")),
TestCase(tvm.te.max(8, x) > 10, tvm.tir.LT(10, x)),
TestCase(x + 1 < tvm.te.max(8, x), x < 7),
TestCase(x < 11, tvm.tir.const(1, "bool"), x <= 10),
TestCase(x <= 10, tvm.tir.const(1, "bool"), x <= 10),
TestCase(z <= 5, tvm.tir.const(1, "bool"), z <= 5),
TestCase(x + y <= 10, tvm.tir.const(1, "bool"), [x <= 10, y <= 0]),
TestCase(x + y >= -10, tvm.tir.const(1, "bool"), [x >= 0, y >= -10]),
TestCase(z - 5 <= y + 10, tvm.tir.const(1, "bool"), [z <= 5, y >= -10]),
TestCase(tvm.tir.all(x > -1, z <= x + 5), tvm.tir.const(1, "bool"), [x >= 0, z <= 5]),
TestCase(x * y <= 0, tvm.tir.const(1, "bool"), [x >= 0, y <= 0]),
TestCase((x + 1) * (y - 1) < 0, tvm.tir.const(1, "bool"), [x >= 0, y <= 0]),
TestCase(y * y >= 0, tvm.tir.const(1, "bool"), y <= 0),
TestCase(x * 6 <= -3, tvm.tir.const(0, "bool"), x >= 0),
TestCase(tmod(y - 1, 3) == 0, tmod(y + (-1), 3) == 0),
)
class TestLogical(BaseCompare):
x, y, z = te.var("x"), te.var("y"), te.var("z")
test_case = tvm.testing.parameter(
TestCase(tvm.tir.And(tvm.tir.EQ(x, y), tvm.tir.NE(x, y)), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(tvm.tir.NE(x, y), tvm.tir.EQ(x, y)), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x > 1, tvm.tir.Not(x > 1)), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x <= y, y < x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(y < x, x <= y), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x < 1, 0 < x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x < 0, 1 < x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x < 1, 1 <= x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x <= 1, 1 < x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(1 <= x, x < 1), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(1 < x, x <= 1), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x <= 1, 2 <= x), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(2 <= x, x <= 1), tvm.tir.const(False, "bool")),
TestCase(tvm.tir.And(x == 1, x != 2), x == 1),
TestCase(tvm.tir.Or(tvm.tir.EQ(x, y), tvm.tir.NE(x, y)), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(tvm.tir.NE(x, y), tvm.tir.EQ(x, y)), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x > y, tvm.tir.Not(x > y)), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x <= y, y < x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(y < x, y >= x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x < 1, 0 < x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(0 < x, x < 1), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x < 1, 1 <= x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x <= 1, 1 < x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(1 <= x, x < 1), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(1 < x, x <= 1), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x <= 1, 2 <= x), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(2 <= x, x <= 1), tvm.tir.const(True, "bool")),
TestCase(tvm.tir.Or(x != 1, x == 2), x != 1),
TestCase(
tvm.tir.Or(x == 1, tvm.tir.Or(y == 1, z == 1)),
tvm.tir.Or(tvm.tir.Or(x == 1, y == 1), z == 1),
),
TestCase(
tvm.tir.And(x == 1, tvm.tir.And(y == 1, z == 1)),
tvm.tir.And(tvm.tir.And(x == 1, y == 1), z == 1),
),
)
class TestLet(BaseCompare):
x, y = te.var("x"), te.var("y")
z = tvm.tir.Let(x, 1, x + 1)
test_case = tvm.testing.parameter(
TestCase(z + z, 4),
)
class TestCast(BaseCompare):
def _generate_tests():
x = te.var("x")
dtypes = ["float32", "float16", "int32", "int8", "bool"]
for dtype1 in dtypes:
yield TestCase(tvm.tir.Cast(dtype1, x - x), tvm.tir.const(0, dtype1))
yield TestCase(tvm.tir.Cast(dtype1, x == x), tvm.tir.const(1, dtype1))
for dtype2 in dtypes:
for i in [0, 1, 2, 3]:
if i <= 1 or (dtype1 != "bool" and dtype2 != "bool"):
yield TestCase(
tvm.tir.Cast(dtype1, tvm.tir.const(i, dtype2)), tvm.tir.const(i, dtype1)
)
test_case = tvm.testing.parameter(*_generate_tests())
class TestShiftLeft(BaseCompare):
z = tvm.tir.op.call_intrin("int32", "tir.shift_left", 1, 10)
test_case = tvm.testing.parameter(
TestCase(z, tvm.tir.const(1 << 10, "int32")),
)
class TestDivZero(BaseCompare):
ramp = tvm.tir.Ramp(1, 1, 2)
broadcast = tvm.tir.Broadcast(0, 2)
test_case = tvm.testing.parameter(
TestCase(tvm.tir.Div(ramp, broadcast), tvm.error.TVMError),
TestCase(tvm.tir.Mod(ramp, broadcast), tvm.error.TVMError),
TestCase(tvm.tir.FloorDiv(ramp, broadcast), tvm.error.TVMError),
TestCase(tvm.tir.FloorMod(ramp, broadcast), tvm.error.TVMError),
)
class TestSubBufferload(BaseCompare):
buf = tvm.tir.decl_buffer([1], dtype="float32")
load = tvm.tir.BufferLoad(buf, [0])
test_case = tvm.testing.parameter(
TestCase(load - load, 0.0),
)
class TestIfThenElse(BaseCompare):
x = te.var("x", "int32")
test_case = tvm.testing.parameter(
TestCase(
tvm.tir.if_then_else(x < 5, tvm.tir.if_then_else(x > 1, 1, 0), 0),
tvm.tir.if_then_else(tvm.tir.And(tvm.tir.LT(x, 5), tvm.tir.LT(1, x)), 1, 0),
),
TestCase(
tvm.tir.if_then_else(x > 2, tvm.tir.if_then_else(x > 1, 1, 0), 0),
tvm.tir.if_then_else(tvm.tir.LT(2, x), 1, 0),
),
)
if __name__ == "__main__":
tvm.testing.main()
| 52,933 | 49.413333 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_usedef.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
@pytest.mark.xfail
def test_loop_dependent_allocate():
N = te.size_var("N")
A = te.placeholder((2 * N,), "float32", "A")
C = te.compute((N,), lambda i: A[2 * i] + A[i + 1], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "local", [C])
s[AA].compute_at(s[C], s[C].op.axis[0])
# this line should fail due to IRUseDefAnalysis sees an allocate statement
# referencing undefined variable
tvm.lower(s, [A, C])
if __name__ == "__main__":
test_loop_dependent_allocate()
| 1,347 | 35.432432 | 78 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_algo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir, script
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, PoolInfoProperties
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[v.buffer_var.name] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""helper to assing poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def _check_max_workspace_size(buffer_pool_allocations, pool_info, size):
max_workspace_size = 0
for buffer_info, pool_allocation in buffer_pool_allocations.items():
if pool_allocation.pool_info == pool_info:
size_candidate = pool_allocation.byte_offset + buffer_info.size_bytes
if size_candidate > max_workspace_size:
max_workspace_size = size_candidate
assert max_workspace_size == size
def test_no_pool_error():
target = Target("c")
tiny_workspace_pool = WorkspacePoolInfo(
"tiny_workspace",
[target],
PoolInfoProperties(size_hint_bytes=10),
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[tiny_workspace_pool]
)
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_c])
bi_c.set_conflicts([bi_a])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.greedy_by_size")
with pytest.raises(
tvm.TVMError, match="TVM USMP Error: the space available in the provided pools exceeded"
):
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
@pytest.mark.parametrize("algorithm", ["greedy_by_size", "greedy_by_conflicts", "hill_climb"])
def test_name_based_ordering(algorithm):
"""This checks when the size and conlicts are same a stable result is generated"""
def _test():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_c, bi_a])
bi_c.set_conflicts([bi_a, bi_b])
buffer_info_arr = [bi_a, bi_b, bi_c]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
assert buffer_pool_allocations[bi_a].byte_offset == 20
assert buffer_pool_allocations[bi_b].byte_offset == 10
assert buffer_pool_allocations[bi_c].byte_offset == 0
# This is tested for several times to check stability
for x in range(0, 10):
_test()
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 140), ("greedy_by_conflicts", 140), ("hill_climb", 140)],
)
def test_linear(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a linear sequence
such as :
(Op A)
|
bi_a
|
(Op B)
|
bi_b
|
.
.
.
(Op F)
|
bi_f
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=50, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b])
bi_b.set_conflicts([bi_a, bi_c])
bi_c.set_conflicts([bi_b, bi_d])
bi_d.set_conflicts([bi_c, bi_e])
bi_e.set_conflicts([bi_d, bi_f])
bi_f.set_conflicts([bi_e])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 190), ("greedy_by_conflicts", 320), ("hill_climb", 190)],
)
def test_fanout(algorithm, workspace_size):
"""
The test case here represent BufferInfo objects
that could get generated for a fanout topology
such as :
(Op A)
|
bi_a ---------
| |
(Op B) (Op C)
| |
bi_b bi_c
| |
(Op D) (Op E)
| |
bi_d bi_e
| |
(Op F) ------
|
bi_f
|
(Op G)
|
bi_g
"""
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
targets=[target],
)
bi_a = usmp_utils.BufferInfo(
name_hint="bi_a", size_bytes=10, pool_candidates=[global_workspace_pool]
)
bi_b = usmp_utils.BufferInfo(
name_hint="bi_b", size_bytes=20, pool_candidates=[global_workspace_pool]
)
bi_c = usmp_utils.BufferInfo(
name_hint="bi_c", size_bytes=100, pool_candidates=[global_workspace_pool]
)
bi_d = usmp_utils.BufferInfo(
name_hint="bi_d", size_bytes=40, pool_candidates=[global_workspace_pool]
)
bi_e = usmp_utils.BufferInfo(
name_hint="bi_e", size_bytes=50, pool_candidates=[global_workspace_pool]
)
bi_f = usmp_utils.BufferInfo(
name_hint="bi_f", size_bytes=60, pool_candidates=[global_workspace_pool]
)
bi_g = usmp_utils.BufferInfo(
name_hint="bi_g", size_bytes=70, pool_candidates=[global_workspace_pool]
)
# Creating conflicts for a linear graph
bi_a.set_conflicts([bi_b, bi_c])
bi_b.set_conflicts([bi_a, bi_c, bi_e])
bi_c.set_conflicts([bi_e, bi_a, bi_b, bi_d])
bi_d.set_conflicts([bi_b, bi_f, bi_c, bi_e])
bi_e.set_conflicts([bi_c, bi_f, bi_b, bi_d])
bi_f.set_conflicts([bi_d, bi_e, bi_f])
bi_g.set_conflicts([bi_f])
buffer_info_arr = [bi_a, bi_b, bi_c, bi_d, bi_e, bi_f, bi_g]
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, 0)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
# fmt: off
@tvm.script.ir_module
class MobilenetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "fast_memory_size", "slow_memory_size"],
[
("greedy_by_size", 200704, 1418528),
("greedy_by_conflicts", 200704, 1418528),
("hill_climb", 200704, 1117462),
],
)
def test_mobilenet_subgraph(algorithm, fast_memory_size, slow_memory_size):
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(
"fast_memory",
[target],
PoolInfoProperties(size_hint_bytes=200704),
)
slow_memory_pool = WorkspacePoolInfo(
"slow_memory",
[target],
)
tir_mod = MobilenetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map_names)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map_names)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map_names)
_verify_conflicts(
"sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map_names
)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map_names)
_check_max_workspace_size(buffer_pool_allocations, slow_memory_pool, slow_memory_size)
_check_max_workspace_size(buffer_pool_allocations, fast_memory_pool, fast_memory_size)
# fmt: off
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [360000], dtype="int16")
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [360000], dtype="int16")
# body
PaddedInput_1 = T.decl_buffer([379456], "int16")
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1 = T.decl_buffer([64], "int32")
for ff_1 in T.serial(0, 64):
Conv2dOutput_1[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1[ff_1] = Conv2dOutput_1[ff_1] + T.cast(PaddedInput_1[T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [1440000], dtype="int32")
# body
PaddedInput_2 = T.decl_buffer([360000], "int16")
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2 = T.decl_buffer([64], "int32")
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = Conv2dOutput_2[ff_2] + T.cast(PaddedInput_2[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [1440000], dtype="uint8")
# body
PaddedInput_3 = T.decl_buffer([360000], "int16")
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3 = T.decl_buffer([64], "int32")
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = Conv2dOutput_3[ff_3] + T.cast(PaddedInput_3[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def tvmgen_default_run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [360000], dtype="int16")
# body
PaddedInput = T.decl_buffer([360000], "int16")
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput = T.decl_buffer([64], "int32")
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
__tvm_meta__ = None
# fmt: on
@pytest.mark.parametrize(
["algorithm", "workspace_size"],
[("greedy_by_size", 7920256), ("greedy_by_conflicts", 7200256), ("hill_climb", 7200256)],
)
def test_resnet_subgraph(algorithm, workspace_size):
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
main_func = tir_mod["tvmgen_default_run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 7200256
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_analysis.buffer_info_stmts)
fusmp_algo = tvm.get_global_func(f"tir.usmp.algo.{algorithm}")
buffer_pool_allocations = fusmp_algo(buffer_info_arr, buffer_info_analysis.memory_pressure)
buffer_info_map_names = dict()
for buf_info in buffer_info_arr:
buffer_info_map_names[buf_info.name_hint] = buf_info
# check conflicts
_verify_conflicts(
"sid_7",
[
"PaddedInput_1",
"sid_2",
"Conv2dOutput_1",
"PaddedInput_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_6",
[
"Conv2dOutput_2",
"PaddedInput_2",
"sid_2",
"PaddedInput_3",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_8",
"sid_2",
"PaddedInput",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_3",
[
"sid_6",
"sid_2",
"Conv2dOutput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"PaddedInput_2",
"sid_2",
"sid_6",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_8",
"sid_2",
"sid_7",
"Conv2dOutput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"sid_7",
"PaddedInput_1",
"sid_2",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput",
[
"sid_2",
"sid_8",
"Conv2dOutput",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_8",
[
"PaddedInput",
"sid_2",
"Conv2dOutput",
"PaddedInput_1",
],
buffer_info_map_names,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput",
"sid_8",
"Conv2dOutput",
"PaddedInput_1",
"sid_7",
"Conv2dOutput_1",
"PaddedInput_2",
"Conv2dOutput_2",
"sid_6",
"PaddedInput_3",
],
buffer_info_map_names,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_7",
"sid_2",
"Conv2dOutput_2",
"sid_6",
],
buffer_info_map_names,
)
_check_max_workspace_size(buffer_pool_allocations, global_workspace_pool, workspace_size)
def test_custom_algo():
target = Target("c")
global_workspace_pool = WorkspacePoolInfo(
"global_workspace",
[target],
)
tir_mod = ResnetStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_workspace_pool])
tir_mod = tir_mod.with_attr("executor", tvm.relay.backend.Executor("aot"))
tir_mod = tir_mod.with_attr("runtime", tvm.relay.backend.Runtime("crt"))
tir_mod["__tvm_main__"] = tir_mod[
"tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast"
]
algo_called = False
@tvm.register_func("tir.usmp.algo.trivial")
def _trivial_algo(buf_infos, mem_pressure):
nonlocal algo_called
algo_called = True
out_layout = {}
offset = 0
for buf_info in buf_infos:
pool_info = buf_info.pool_candidates[0]
out_layout[buf_info] = usmp_utils.PoolAllocation(pool_info, offset)
offset += buf_info.size_bytes
return out_layout
usmp_pass = tvm.get_global_func("tir.transform.UnifiedStaticMemoryPlanner")
usmp_pass()(tir_mod)
assert not algo_called
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "trivial"}):
usmp_pass()(tir_mod)
assert algo_called
with pytest.raises(
tvm.TVMError, match="The selected custom USMP algorithm : invalid is not defined"
):
with tvm.transform.PassContext(config={"tir.usmp.custom_algorithm": "invalid"}):
usmp_pass()(tir_mod)
| 33,461 | 44.77565 | 472 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule Profiler """
import time
from tvm import meta_schedule as ms
def test_meta_schedule_profiler_context_manager():
with ms.Profiler() as profiler:
time.sleep(1)
with ms.Profiler.timeit("Level0"):
time.sleep(1)
with ms.Profiler.timeit("Level1"):
time.sleep(2)
# Note that the results are in seconds
result = profiler.get()
assert len(result) == 3
assert 3.9 <= result["Total"] <= 4.1
assert 2.9 <= result["Level0"] <= 3.1
assert 1.9 <= result["Level1"] <= 2.1
def test_meta_schedule_no_context():
with ms.Profiler.timeit("Level0"):
assert ms.Profiler.current() is None
if __name__ == "__main__":
test_meta_schedule_profiler_context_manager()
test_meta_schedule_no_context()
| 1,590 | 32.851064 | 62 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_prim_func_pass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_prim_func_pass():
@tvm.tir.transform.prim_func_pass(opt_level=1)
class TestReplaceFunc:
"""Simple test function to replace one argument to another."""
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
return self.new_func
x = te.var("x")
y = te.var("y")
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func = tvm.tir.PrimFunc([x, y, b], stmt)
new_func = tvm.tir.PrimFunc([x, y, b], tvm.tir.Evaluate(0))
mod = tvm.IRModule({"main": func})
mod = TestReplaceFunc(new_func)(mod)
assert tvm.ir.structural_equal(mod["main"].body, new_func.body)
def test_cow_pass():
def fapply(f):
assert tvm.testing.object_use_count(f) == 1
return f
pidentity = tvm.tir.transform.Apply(fapply)
x = te.var("x")
func = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x)).with_attr("target_bits", 32)
func_hash = func.__hash__()
mod = tvm.IRModule({"main": func})
del func
# copy on write
mod_hash = mod.__hash__()
mod = tvm.transform.Sequential([pidentity, tvm.tir.transform.NarrowDataType(32)])(mod._move())
assert mod_hash == mod.__hash__()
assert func_hash == mod["main"].__hash__()
if __name__ == "__main__":
test_cow_pass()
test_prim_func_pass()
| 2,238 | 31.449275 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_calculate_allocated_memory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
@tvm.script.ir_module
class Module:
@T.prim_func
def scale_by_two(a: T.Buffer((128,), "int8"), c: T.Buffer((128,), "int8")):
for i in T.serial(128):
with T.block("C"):
c[i] = a[i] * T.int8(2)
@T.prim_func
def scale_by_two_three(a: T.Buffer((128,), "int8"), c: T.Buffer((128,), "int8")):
B = T.alloc_buffer([128], dtype="int8", scope="global.vtcm")
for i in T.serial(128):
with T.block("B"):
B[i] = a[i] * T.int8(2)
for i in T.serial(128):
with T.block("C"):
c[i] = B[i] * T.int8(3)
# pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
# fmt: on
@pytest.mark.parametrize(
"primFunc,size", [(Module["scale_by_two"], 128), (Module["scale_by_two_three"], 256)]
)
def test_scale_by(primFunc, size):
"""Test calculate allocated bytes per scope"""
mod = tvm.IRModule.from_expr(primFunc.with_attr("global_symbol", "main"))
sch = tir.Schedule(mod, debug_mask="all")
block_c = sch.get_block("C")
(flat,) = sch.get_loops(block_c)
cache_block = sch.cache_read(block_c, 0, storage_scope="global.vtcm")
sch.compute_at(cache_block, flat)
mod = sch.mod
mod = tvm.tir.transform.ConvertBlocksToOpaque()(mod)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
sizes = tvm.tir.analysis.calculate_allocated_bytes(mod["main"])
assert "main" in sizes, 'Calls with PrimFunc is expected to return with function key as "main"'
sizes = sizes["main"]
assert sizes.get("global.vtcm", 0) == size
@T.prim_func
def matmul_mix_scope(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], scope="global")
B = T.match_buffer(b, [128, 128], scope="global")
C = T.match_buffer(c, [128, 128], scope="global")
A_allocated = T.alloc_buffer([128, 128], dtype="float32", scope="global.texture")
B_allocated = T.alloc_buffer([128, 128], dtype="float32", scope="global.texture")
C_allocated = T.alloc_buffer([128, 128], dtype="float32", scope="global")
for i, j in T.grid(128, 128):
with T.block("A.allocated"):
A_allocated[i, j] = A[i, j]
for i, j in T.grid(128, 128):
with T.block("B.allocated"):
B_allocated[i, j] = B[i, j]
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C_allocated[vi, vj] = 0.0
C_allocated[vi, vj] = C[vi, vj] + A_allocated[vi, vk] * B_allocated[vj, vk]
for i, j in T.grid(128, 128):
with T.block("C"):
C[i, j] = C_allocated[i, j]
@pytest.mark.parametrize(
"scope,size", [("global", 65536), ("global.texture", 131072), ("global.texture-nhwc", 0)]
)
def test_matmul_mix_scope(scope, size):
"""Test calculate allocated bytes per scope"""
mod = tvm.IRModule({"main": matmul_mix_scope})
mod = tvm.tir.transform.LowerInitBlock()(mod)
mod = tvm.tir.transform.ConvertBlocksToOpaque()(mod)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
sizes = tvm.tir.analysis.calculate_allocated_bytes(mod["main"])
assert "main" in sizes, 'Calls with PrimFunc is expected to return with function key as "main"'
sizes = sizes["main"]
assert sizes.get(scope, 0) == size
def test_full_mod_calculator():
def apply_schedule(sch, func_name):
sch.work_on(func_name)
block_c = sch.get_block("C")
sch.cache_read(block_c, 0, storage_scope="global.vtcm")
sch = tvm.tir.Schedule(Module, debug_mask="all")
apply_schedule(sch, "scale_by_two")
apply_schedule(sch, "scale_by_two_three")
mod = tvm.tir.transform.ConvertBlocksToOpaque()(sch.mod)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
sizes = tvm.tir.analysis.calculate_allocated_bytes(mod)
assert "scale_by_two" in sizes, "Values for scale_by_two not found"
scale_by_two_sizes = sizes["scale_by_two"]
assert (
"global.vtcm" in scale_by_two_sizes
), "Expected global.vtcm allocation to be calculated scale_by_two"
assert scale_by_two_sizes["global.vtcm"] == 128, "Expected the calculated size to be 128"
scale_by_two_three_sizes = sizes["scale_by_two_three"]
assert (
"global.vtcm" in scale_by_two_three_sizes
), "Expected global.vtcm allocation to be calculated scale_by_two_three"
assert scale_by_two_three_sizes["global.vtcm"] == 256, "Expected the calculated size to be 256"
if __name__ == "__main__":
tvm.testing.main()
| 5,741 | 39.723404 | 142 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_make_unpacked_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te, tir
from tvm.script import tir as T, ir as I
import numpy
@pytest.fixture
def mod_without_attrs():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
stmt = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
@pytest.fixture
def mod(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(
mod_without_attrs
)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
return mod
def test_noop_if_not_global_symbol(mod_without_attrs):
target = tvm.target.Target("llvm", host="llvm")
before = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod_without_attrs)
after = tvm.tir.transform.MakeUnpackedAPI()(before)
tvm.ir.assert_structural_equal(before, after)
def test_fails_if_no_target(mod_without_attrs):
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod_without_attrs)
with pytest.raises(
tvm.TVMError,
match="MakeUnpackedAPI required the function to be annotated with tvm::attr::kTarget",
):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
@tvm.testing.parametrize_targets("c", "llvm", "cuda")
def test_device_setup(mod, target, dev):
target = tvm.target.Target(target, host="llvm")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
assert f.body.node == "default"
assert f.body.attr_key == "device_id"
assert f.body.value == 0
assert f.body.body.node == "default"
assert f.body.body.attr_key == "device_type"
assert f.body.body.value == dev.device_type
def test_no_buffers_no_device_setup():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping(mod):
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 1
assert f.params[0].name == "A"
def test_argument_mapping_multiple():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "B"
def test_argument_mapping_multiple_matching():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, A], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 2
assert f.params[0].name == "A"
assert f.params[1].name == "A"
def test_body():
ib = tvm.tir.ir_builder.create()
A = tvm.tir.decl_buffer(name="A", shape=[1])
B = tvm.tir.decl_buffer(name="B", shape=[1])
C = ib.buffer_ptr(A)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, B, C], stmt))
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm")))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
f = tvm.tir.transform.MakeUnpackedAPI()(mod)["main"]
assert len(f.params) == 3
assert f.params[0].name == "A"
assert f.params[1].name == "B"
assert f.params[2].name == "A"
class TestTargetHostRemoved(tvm.testing.CompareBeforeAfter):
"""After MakeUnpackedAPI, host-side target should be the host
MakeUnpackedAPI is the last transform that requires both the device
and the host. After MakeUnpackedAPI, the target attribute should
only contain the host-side target.
"""
transform = tvm.tir.transform.MakeUnpackedAPI()
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("cuda", host="llvm")})
mod.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"target": T.target("cuda")})
T.evaluate(A_data)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "main", "target": T.target("llvm")})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 2)
mod.subroutine(A_data)
T.ret(T.int32(0))
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"target": T.target("cuda")})
T.evaluate(A_data)
return mod
class TestInternalSubroutineCall(tvm.testing.CompareBeforeAfter):
"""Internal subroutines do not require modification
A subroutine without the "global_symbol" attribute is an internal
subroutine, and is not directly exposed to a user of the generated
`runtime.Module`.
"""
transform = tvm.tir.transform.MakeUnpackedAPI()
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("llvm", host="llvm")})
mod.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"target": T.target("llvm")})
T.evaluate(A_data)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "main", "target": T.target("llvm")})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
mod.subroutine(A_data)
T.ret(T.int32(0))
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"target": T.target("llvm")})
T.evaluate(A_data)
return mod
class TestSubroutineCallToExternallyVisibleSubroutine(tvm.testing.CompareBeforeAfter):
"""Externally-visible subroutines should be updated
Subroutines that are exposed externally should be updated by
MakeUnpackedAPI.
"""
transform = tvm.tir.transform.MakeUnpackedAPI()
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("llvm", host="llvm")})
mod.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr(
{"global_symbol": "subroutine", "target": T.target("llvm", host="llvm")}
)
T.evaluate(A_data)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "main", "target": T.target("llvm")})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
mod.subroutine(A_data)
T.ret(T.int32(0))
@T.prim_func
def subroutine(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "subroutine", "target": T.target("llvm")})
T.evaluate(A_data)
T.ret(T.int32(0))
return mod
class TestCallExternallyVisibleSubroutineWithDLTensor(tvm.testing.CompareBeforeAfter):
"""Callsites of externally-visible subroutines may require updates
The MakeUnpackedAPI transform lowers all buffers into a data
pointer to a primitive type. If a subroutine call is currently
passing a DLTensor produced by `T.tvm_make_stack_array` into the
subroutine, the callsite should be updated to instead pass the
data pointer directly.
"""
transform = tvm.tir.transform.MakeUnpackedAPI()
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("llvm", host="llvm")})
mod.subroutine(
T.tvm_stack_make_array(
A.data,
T.tvm_stack_make_shape(1, dtype="handle"),
T.reinterpret(T.uint64(0), dtype="handle"),
T.uint32(1),
T.Cast("float32", 0),
0,
dtype="handle",
)
)
@T.prim_func
def subroutine(A: T.Buffer(1, "float32")):
T.func_attr(
{"global_symbol": "subroutine", "target": T.target("llvm", host="llvm")}
)
T.evaluate(A.data)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "main", "target": T.target("llvm")})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
mod.subroutine(A_data)
T.ret(T.int32(0))
@T.prim_func
def subroutine(A_data: T.handle("float32")) -> T.int32:
T.func_attr({"global_symbol": "subroutine", "target": T.target("llvm")})
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
T.evaluate(A_data)
T.ret(T.int32(0))
return mod
if __name__ == "__main__":
tvm.testing.main()
| 11,864 | 34.10355 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_arith_solve_linear_equations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import sys
import pytest
import tvm
from tvm import te, arith, ir, tir, testing
def test_solution_consistency():
seed = random.randrange(sys.maxsize)
print(
"\nThis test is intentionally non-deterministic, "
"if it fails please report it in github issue together with this seed {}\n".format(seed)
)
random.seed(seed)
def _check(num_vars, num_formulas, coef=(-5, 5), bounds=(-20, 20)):
variables = [te.var("x" + str(i)) for i in range(num_vars)]
relations = []
for i in range(num_formulas):
s1 = sum([v * random.randint(coef[0], coef[1]) for v in variables])
s1 += random.randint(coef[0], coef[1])
s2 = sum([v * random.randint(coef[0], coef[1]) for v in variables])
s2 += random.randint(coef[0], coef[1])
if random.random() < 0.7:
op = tvm.tir.EQ
else:
# we also make sure it can correctly handle inequalities
op = random.choice([tvm.tir.LE, tvm.tir.LT, tvm.tir.GE, tvm.tir.GT])
relations.append(op(s1, s2))
vranges = {v: tvm.ir.expr.Range(bounds[0], bounds[1] + 1) for v in variables}
solution = arith.solve_linear_equations(relations, variables, vranges)
testing.check_int_constraints_trans_consistency(solution)
# leaving some variables as parameters should also be ok
for k in [1, 2]:
if len(variables) > k:
solution = arith.solve_linear_equations(relations, variables[:-k], vranges)
param_ranges = {v: vranges[v] for v in variables[-k:]}
testing.check_int_constraints_trans_consistency(solution, param_ranges)
for i in range(2):
_check(num_vars=1, num_formulas=1)
for i in range(2):
_check(num_vars=1, num_formulas=2)
for i in range(2):
_check(num_vars=2, num_formulas=1)
for i in range(2):
_check(num_vars=2, num_formulas=2)
for i in range(2):
_check(num_vars=2, num_formulas=3)
for i in range(3):
_check(num_vars=3, num_formulas=3, coef=(-2, 2))
for i in range(3):
_check(num_vars=3, num_formulas=4, coef=(-2, 2))
for i in range(3):
_check(num_vars=4, num_formulas=3, coef=(-1, 1))
for i in range(3):
_check(num_vars=10, num_formulas=2, coef=(-1, 1), bounds=(0, 4))
for i in range(3):
_check(num_vars=10, num_formulas=3, coef=(0, 1), bounds=(0, 4))
def test_empty_var_to_solve():
x, y = te.var("x"), te.var("y")
equations = [
tvm.tir.EQ(x + y, 20),
tvm.tir.EQ(x - y, 10),
]
solution = arith.solve_linear_equations(equations)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
assert len(solution.src.variables) == 0
assert len(solution.src.ranges) == 0
assert ir.structural_equal(solution.src.relations, equations)
assert ir.structural_equal(solution.src, solution.dst)
def test_unique_solution():
x, y = te.var("x"), te.var("y")
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 20),
tvm.tir.EQ(x - y, 10),
],
[x, y],
)
assert list(solution.dst.variables) == []
assert ir.structural_equal(solution.src_to_dst[x], 15)
assert ir.structural_equal(solution.src_to_dst[y], 5)
def test_low_rank():
x, y, z = te.var("x"), te.var("y"), te.var("z")
ranges = {}
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y + z, 15),
tvm.tir.EQ(x + y, 10),
],
[x, y, z],
ranges,
)
[n0] = solution.dst.variables
assert ir.structural_equal(solution.src_to_dst[x], n0 + 10)
assert ir.structural_equal(solution.src_to_dst[y], -n0)
assert ir.structural_equal(solution.src_to_dst[z], 5)
def test_infer_range():
x, y = te.var("x"), te.var("y")
ranges = {
x: tvm.ir.Range.from_min_extent(-5, 10),
y: tvm.ir.Range.from_min_extent(0, 10),
}
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 0),
],
[x, y],
ranges,
)
[n0] = solution.dst.variables
assert ir.structural_equal(solution.src_to_dst[x], n0)
assert ir.structural_equal(solution.src_to_dst[y], -n0)
# inferred from y's range
assert ir.structural_equal(solution.dst.ranges[n0].min, -9)
assert ir.structural_equal(solution.dst.ranges[n0].extent, 10)
# additional inequality is added into the system for x
[ineq] = solution.dst.relations
assert isinstance(ineq, tvm.tir.LE)
assert ir.structural_equal(ineq.a, -5)
assert ir.structural_equal(ineq.b, n0)
def test_ill_formed():
x, y = te.var("x"), te.var("y")
solution = arith.solve_linear_equations(
[
tvm.tir.EQ(x + y, 0),
tvm.tir.EQ(x - y, 0),
tvm.tir.EQ(x, 5),
],
[x, y],
{},
)
assert list(solution.dst.variables) == []
[rel] = solution.dst.relations
assert ir.structural_equal(rel, False)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
if __name__ == "__main__":
tvm.testing.main()
| 6,029 | 32.131868 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_feature_extractor_per_store_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import sys
from typing import Callable, List
import pytest
import tvm
import tvm.testing
from numpy.testing import assert_allclose
from tvm import meta_schedule as ms
from tvm import te, tir
from tvm.script import tir as T
N_FEATURES = 164
@T.prim_func
def matmul(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
# from tvm.script import tir as T
@tvm.script.ir_module
class LayoutTransform:
@T.prim_func
def main(placeholder: T.Buffer((1, 16, 7, 7, 32), "float32"), placeholder_1: T.Buffer((25088,), "float32"), T_layout_trans: T.Buffer((1, 1, 7, 7, 512), "float32")) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
for i0_i1_i2_i3_i4_fused in T.parallel(25088, annotations={"pragma_auto_unroll_max_step":64, "pragma_unroll_explicit":1}):
with T.block("T_layout_trans_1"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(1, 0)
ax2 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused // 3584)
ax3 = T.axis.spatial(7, i0_i1_i2_i3_i4_fused % 3584 // 512)
ax4 = T.axis.spatial(512, i0_i1_i2_i3_i4_fused % 512)
T.reads(placeholder[0, (ax4 * 49 + ax2 * 7 + ax3) % 25088 // 1568, (ax2 * 7 + ax3) % 49 // 7, ax3 % 7, (ax4 * 49 + ax2 * 7 + ax3) % 1568 // 49], placeholder_1[(ax4 * 49 + ax2 * 7 + ax3) % 25088])
T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4])
T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else(ax0 < 1 and ax1 * 512 + ax4 < 512 and ax2 < 7 and ax3 < 7, T.Select(T.float32(0) < T.if_then_else(T.LT(0, 1) and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype="float32"), T.if_then_else(T.LT(0, 1) and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype="float32"), T.if_then_else(T.LT(0, 1) and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 < 512 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7 < 7 and ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7 < 7, placeholder[0, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 // 32, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 49 // 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 7, ((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088 % 25088 // 49 % 32], T.float32(0), dtype="float32") * placeholder_1[((ax1 * 512 + ax4) * 49 + ax2 * 7 + ax3) % 25088]), T.float32(0), dtype="float32")
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def _make_context(target) -> ms.TuneContext:
return ms.TuneContext(
target=target,
num_threads=1,
)
def _make_candidate(f_sch: Callable[[], tir.Schedule]) -> ms.MeasureCandidate:
return ms.MeasureCandidate(sch=f_sch(), args_info=[])
def _feature_names( # pylint: disable=invalid-name
buffers_per_store: int = 5,
arith_intensity_curve_num_samples: int = 10,
) -> List[str]:
result = [
"float_mad",
"float_addsub",
"float_mul",
"float_divmod",
"float_cmp",
"float_mathfunc",
"float_otherfunc",
"int_mad",
"int_addsub",
"int_mul",
"int_divmod",
"int_cmp",
"int_mathfunc",
"int_otherfunc",
"bool_op",
"select_op",
"vec_num",
"vec_prod",
"vec_len",
"vec_type.kPosNone",
"vec_type.kPosInnerSpatial",
"vec_type.kPosMiddleSpatial",
"vec_type.kPosOuterSpatial",
"vec_type.kPosInnerReduce",
"vec_type.kPosMiddleReduce",
"vec_type.kPosOuterReduce",
"vec_type.kPosMixed",
"unroll_num",
"unroll_prod",
"unroll_len",
"unroll_type.kPosNone",
"unroll_type.kPosInnerSpatial",
"unroll_type.kPosMiddleSpatial",
"unroll_type.kPosOuterSpatial",
"unroll_type.kPosInnerReduce",
"unroll_type.kPosMiddleReduce",
"unroll_type.kPosOuterReduce",
"unroll_type.kPosMixed",
"parallel_num",
"parallel_prod",
"parallel_len",
"parallel_type.kPosNone",
"parallel_type.kPosInnerSpatial",
"parallel_type.kPosMiddleSpatial",
"parallel_type.kPosOuterSpatial",
"parallel_type.kPosInnerReduce",
"parallel_type.kPosMiddleReduce",
"parallel_type.kPosOuterReduce",
"parallel_type.kPosMixed",
"is_gpu",
"blockIdx_x_len",
"blockIdx_y_len",
"blockIdx_z_len",
"threadIdx_x_len",
"threadIdx_y_len",
"threadIdx_z_len",
"vthread_len",
]
for i in range(buffers_per_store):
result.extend(
f"B{i}.{s}"
for s in [
"acc_type.kRead",
"acc_type.kWrite",
"acc_type.kReadWrite",
"bytes",
"unique_bytes",
"lines",
"unique_lines",
"reuse_type.kLoopMultipleRead",
"reuse_type.kSerialMultipleReadWrite",
"reuse_type.kNoReuse",
"reuse_dis_iter",
"reuse_dis_bytes",
"reuse_ct",
"bytes_d_reuse_ct",
"unique_bytes_d_reuse_ct",
"lines_d_reuse_ct",
"unique_lines_d_reuse_ct",
"stride",
]
)
result.extend(f"arith_intensity_curve_{i}" for i in range(arith_intensity_curve_num_samples))
result.extend(
[
"alloc_size",
"alloc_prod",
"alloc_outer_prod",
"alloc_inner_prod",
"outer_prod",
"num_loops",
"auto_unroll_max_step",
]
)
# 57 + 18 * 5 + 10 + 4 + 3
assert len(result) == N_FEATURES
return result
def _zip_feature(feature, names):
assert feature.ndim == 1
assert feature.shape[0] == N_FEATURES
assert len(names) == N_FEATURES
return list(zip(names, feature))
def _print_feature(feature, st, ed): # pylint: disable=invalid-name
named_feature = _zip_feature(feature, _feature_names())
for k, v in named_feature[st:ed]:
print("\t", k, v)
def test_cpu_matmul():
def _create_schedule():
func = matmul
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("C")
i, j, k = sch.get_loops(block)
i_o, i_i = sch.split(i, factors=[None, 16]) # outer: 32
j_o, j_i = sch.split(j, factors=[None, 8]) # outer: 64
sch.reorder(i_o, j_o, k, j_i, i_i)
sch.vectorize(j_i)
sch.parallel(i_o)
sch.parallel(j_o)
sch.unroll(k)
return sch
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (1, N_FEATURES)
f = feature[0]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
# fmt: off
desired=[
# float math ops
0, 27, 27, 0, 0, 0, 0,
# int math ops
0, 29, 29, 0, 0, 0, 0,
# bool/select ops
0, 0,
],
# fmt: on
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[1.0, 3.169924, 3.169924, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[1.0, 9.002815, 9.002815, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[1.58496, 11.0007, 6.022368, 0, 0, 0, 0, 0, 0, 0, 1],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer A
assert_allclose(
actual=f[57:75],
desired=[
1,
0,
0,
29,
20,
27,
14,
1,
0,
0,
4.087463,
7.0552826,
3.169925,
26,
17,
24,
11.0007038,
9.002815,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer C
assert_allclose(
actual=f[75:93],
desired=[
0.0,
0.0,
1.0,
29.0,
20.000001907348633,
27.0,
14.00008773803711,
1.0,
0.0,
0.0,
7.011227130889893,
9.250298500061035,
9.002815246582031,
20.000001907348633,
11.000703811645508,
18.0000057220459,
5.044394016265869,
9.002815246582031,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Buffer B
assert_allclose(
actual=f[93:111],
desired=[
1.0,
0.0,
0.0,
29.0,
20.000001907348633,
19.000001907348633,
14.00008773803711,
1.0,
0.0,
0.0,
1.0,
3.700439691543579,
4.087462902069092,
25.0,
16.000022888183594,
15.000043869018555,
10.001408194392809,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
# arithmetic intensity = flops/bytes touched = 2*512*512*512/(3 * 4 * 512*512)
# add and multiply ^ 3 arrays ^ ^ 4 bytes per f32
# = 85.3 but log2 is used so values should be around 6.4
assert_allclose(
actual=f[147:157],
desired=[
3.812599,
4.464822,
4.912349,
5.253426,
5.529086,
5.76043,
5.959752,
6.134849,
6.290977,
6.431846,
],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
20.000001907348633,
18.0000057220459,
1.0,
27.0,
27.0,
2.5849626064300537,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
def test_cpu_fusion():
# pylint: disable=all
@T.prim_func
def func(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [64, 32], dtype="float32")
B = T.match_buffer(b, [64, 32], dtype="float32")
C = T.match_buffer(c, [64, 32], dtype="float32")
for i, j in T.grid(64, 32): # type: ignore
with T.block():
T.reads([A[i, j], B[i, j]]) # type: ignore
T.writes([B[i, j], C[i, j]]) # type: ignore
with T.block("B"):
T.reads([A[i, j]]) # type: ignore
T.writes([B[i, j]]) # type: ignore
B[i, j] = A[i, j] # type: ignore
with T.block("C"):
T.reads([B[i, j]]) # type: ignore
T.writes([C[i, j]]) # type: ignore
C[i, j] = B[i, j] # type: ignore
# pylint: enable=all
def _create_schedule():
return tir.Schedule(func, debug_mask="all")
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (2, N_FEATURES)
## Features for BufferStore(B)
f = feature[0]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
# fmt: off
desired=[0.0] * 16,
# fmt: on
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer A
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer B
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Dummy padding
assert_allclose(
actual=f[93:111],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
13.000176,
11.000703811645508,
1.0,
11.000703811645508,
11.000703811645508,
1.5849624872207642,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
## Features for BufferStore(C)
f = feature[1]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
# fmt: off
desired=[0.0] * 16,
# fmt: on
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer B
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
1.0,
0.0,
1.0,
4.087462902069092,
1.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer C
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
13.000176429748535,
13.000176429748535,
7.011227130889893,
7.011227130889893,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
14.00008773803711,
14.00008773803711,
8.005624771118164,
8.005624771118164,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Dummy padding
assert_allclose(
actual=f[93:111],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[0.0] * 18,
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
13.000176429748535,
11.000703811645508,
1.0,
11.000703811645508,
11.000703811645508,
1.5849624872207642,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
def test_empty_feature():
@T.prim_func
def full(T_full: T.Buffer((T.int64(2), T.int64(3)), "float32")):
for ax0, ax1 in T.grid(T.int64(2), T.int64(3)):
with T.block("T_full"):
v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1])
T.reads()
T.writes(T_full[v_ax0, v_ax1])
T_full[v_ax0, v_ax1] = T.float32(1)
def _create_schedule():
return tir.Schedule(full, debug_mask="all")
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (0, N_FEATURES)
def test_gpu():
def _create_schedule():
func = matmul
sch = tir.Schedule(func, debug_mask="all")
c = sch.get_block("C")
c_local = sch.cache_write(c, 0, "local")
i, j, k = sch.get_loops(c)
# pylint: disable=invalid-name
i0, i1, i2, i3, i4 = sch.split(i, factors=[None, 1, 16, 32, 1]) # outer: 1
j0, j1, j2, j3, j4 = sch.split(j, factors=[None, 4, 1, 1, 16]) # outer: 8
k0, k1, k2 = sch.split(k, factors=[None, 1, 2]) # outer: 256
# pylint: enable=invalid-name
# fmt: off
sch.reorder(
i0, j0, # S
i1, j1, # S
i2, j2, # S
k0, # R
k1, # R
i3, j3, # S
k2, # R
i4, j4, # S
)
# fmt: on
# thread binding
i0_j0 = sch.fuse(i0, j0)
i1_j1 = sch.fuse(i1, j1)
i2_j2 = sch.fuse(i2, j2)
sch.bind(i0_j0, "blockIdx.x")
sch.bind(i1_j1, "vthread.x")
sch.bind(i2_j2, "threadIdx.x")
# fusion
sch.reverse_compute_at(c_local, i2_j2)
# cache read 'A'
a_shared = sch.cache_read(c, 1, "shared")
sch.compute_at(a_shared, k0)
_, _, _, _, a_i, a_j = sch.get_loops(a_shared)
a_ij = sch.fuse(a_i, a_j)
_, a_j = sch.split(a_ij, factors=[None, 16]) # outer: 64
sch.bind(a_j, "threadIdx.x")
# cache read 'B'
b_shared = sch.cache_read(c, 2, "shared")
sch.compute_at(b_shared, k0)
_, _, _, _, b_i, b_j = sch.get_loops(b_shared)
b_ij = sch.fuse(b_i, b_j)
_, b_j = sch.split(b_ij, factors=[None, 16]) # outer: 8
sch.bind(b_j, "threadIdx.x")
# auto unroll
sch.annotate(i0_j0, "pragma_auto_unroll_max_step", tir.IntImm("int32", 1024))
sch.annotate(i0_j0, "pragma_unroll_explicit", tir.IntImm("int32", 1))
return sch
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("cuda")),
candidates=[_make_candidate(_create_schedule)],
)
feature = feature.numpy()
assert feature.shape == (4, N_FEATURES)
### Check feature[0]: BufferStore(A_shared) <= A[...]
f = feature[0]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
24.000000085991324,
24.000000085991324,
24.000000085991324,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer A
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
25.000000042995662,
20.000001375860553,
23.00000017198264,
14.000088052430122,
1.0,
0.0,
0.0,
18.00000550343433,
20.00562591970089,
2.321928094887362,
23.00000017198264,
18.00000550343433,
21.000000687930438,
12.0003521774803,
12.0003521774803,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer A.shared
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
25.000000042995662,
12.0003521774803,
23.00000017198264,
9.002815015607053,
1.0,
0.0,
0.0,
6.022367813028454,
11.98049663618346,
8.005624549193879,
17.000011006847668,
4.087462841250339,
15.000044026886828,
1.584962500721156,
4.087462841250339,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Dummy padding
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
12.0003521774803,
27.000000010748916,
17.000011006847668,
6.022367813028454,
23.00000017198264,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
### Check feature[1]: BufferStore(B_shared) <= B[...]
f = feature[1]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
21.584962959341485,
21.584962959341485,
21.000000687930438,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer B
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
22.00000034396526,
20.000001375860553,
20.000001375860553,
14.000088052430122,
1.0,
0.0,
0.0,
15.000044026886828,
20.17555076886471,
2.321928094887362,
20.000001375860553,
18.00000550343433,
18.00000550343433,
12.0003521774803,
4.087462841250339,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer B.shared
assert_allclose(
actual=f[75:93],
desired=[
0.0,
1.0,
0.0,
22.00000034396526,
9.002815015607053,
20.000001375860553,
3.169925001442312,
1.0,
0.0,
0.0,
3.169925001442312,
9.61654884377899,
8.005624549193879,
14.000088052430122,
1.584962500721156,
12.0003521774803,
0.044394119358453436,
4.087462841250339,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Dummy padding
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
9.002815015607053,
24.000000085991324,
17.000011006847668,
3.169925001442312,
20.000001375860553,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
### Check feature[2]: BufferStore(C_local) <= C_local[...] + A_shared[...] * B_shared[...]
f = feature[2]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
desired=[
0.0,
27.000000010748916,
27.000000010748916,
0.0,
0.0,
0.0,
0.0,
0.0,
28.000000005374456,
28.000000005374456,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer B.shared
assert_allclose(
actual=f[57:75],
desired=[
1.0,
0.0,
0.0,
29.00000000268723,
9.002815015607053,
23.00000017198264,
3.169925001442312,
1.0,
0.0,
0.0,
5.044394119358453,
7.651051691178929,
5.044394119358453,
24.000000085991324,
4.087462841250339,
18.00000550343433,
0.32192809488736235,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer C.local
assert_allclose(
actual=f[75:93],
desired=[
0.0,
0.0,
1.0,
29.00000000268723,
11.000704269011246,
23.00000017198264,
5.044394119358453,
1.0,
0.0,
0.0,
4.087462841250339,
7.05528243550119,
1.584962500721156,
28.000000005374456,
10.001408194392809,
22.00000034396526,
4.087462841250339,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Buffer A.shared
assert_allclose(
actual=f[93:111],
desired=[
1.0,
0.0,
0.0,
29.00000000268723,
12.0003521774803,
19.00000275171979,
9.002815015607053,
1.0,
0.0,
0.0,
1.0,
3.700439718141092,
4.087462841250339,
25.000000042995662,
8.005624549193879,
15.000044026886828,
5.044394119358453,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
# Arithmetic intensity is high here because of repeated use of a shared
# buffer. Multiple accesses to the same memory location are counted as a
# single byte, skewing these numbers towards higher intensity.
assert_allclose(
actual=f[147:157],
desired=[
11.98533,
12.977811,
13.562714,
13.977722,
14.299632,
14.562654,
14.785038,
14.977677,
15.147597,
15.299596,
],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
11.000704269011246,
18.00000550343433,
9.002815015607053,
18.00000550343433,
27.000000010748916,
3.0,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
### Check feature[3]: BufferStore(C) <= C_local[...]
f = feature[3]
# Group 1.1: arith
assert_allclose(
actual=f[0:16],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.2: vectorize
assert_allclose(
actual=f[16:27],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.3: unroll
assert_allclose(
actual=f[27:38],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.4: parallel
assert_allclose(
actual=f[38:49],
desired=[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 1.5: is_gpu, blockIdx.x/y/z, threadIdx.x/y/z, vthread
assert_allclose(
actual=f[49:57],
desired=[1.0, 3.169925001442312, 1.0, 1.0, 4.087462841250339, 1.0, 1.0, 2.321928094887362],
rtol=1e-5,
atol=1e-5,
)
# Group 2.1: Buffer C
assert_allclose(
actual=f[57:75],
desired=[
0.0,
1.0,
0.0,
20.000001375860553,
20.000001375860553,
14.000088052430122,
14.000088052430122,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
21.000000687930438,
21.000000687930438,
15.000044026886828,
15.000044026886828,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.2: Buffer C.local
assert_allclose(
actual=f[75:93],
desired=[
1.0,
0.0,
0.0,
20.000001375860553,
11.000704269011246,
14.000088052430122,
5.044394119358453,
1.0,
0.0,
0.0,
9.002815015607053,
12.0003521774803,
4.087462841250339,
16.00002201361136,
7.011227255423254,
10.001408194392809,
1.584962500721156,
1.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.3: Dummy padding
assert_allclose(
actual=f[93:111],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.4: Dummy padding
assert_allclose(
actual=f[111:129],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 2.5: Dummy padding
assert_allclose(
actual=f[129:147],
desired=[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
rtol=1e-5,
atol=1e-5,
)
# Group 3: Arithmetic intensity
assert_allclose(
actual=f[147:157],
desired=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
rtol=1e-5,
atol=1e-5,
)
# Group 4 & 5
assert_allclose(
actual=f[157:164],
desired=[
20.000001375860553,
18.00000550343433,
1.0,
18.00000550343433,
18.00000550343433,
2.584962500721156,
10.001408,
],
rtol=1e-5,
atol=1e-5,
)
def test_cpu_layout_transform():
extractor = ms.feature_extractor.PerStoreFeature()
(feature,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(lambda: tir.Schedule(LayoutTransform))],
)
@T.prim_func
def negative_extent(A: T.Buffer((1,), "float32")):
for j in range(0, -1):
A[j] = A[j] + 1.0
def test_negative_extent():
extractor = ms.feature_extractor.PerStoreFeature()
(features,) = extractor.extract_from(
_make_context(tvm.target.Target("llvm")),
candidates=[_make_candidate(lambda: tir.Schedule(negative_extent))],
)
named_features = dict(zip(_feature_names(), list(features.numpy()[0, :])))
assert named_features["B0.unique_bytes"] == 0
if __name__ == "__main__":
tvm.testing.main()
| 43,065 | 25.30788 | 1,840 | py |
tvm | tvm-main/tests/python/unittest/test_te_tag.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
from tvm import te
from tvm import te
@tvm.te.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = te.reduce_axis((0, IC), name="ic")
dh = te.reduce_axis((0, KH), name="dh")
dw = te.reduce_axis((0, KW), name="dw")
return te.compute(
(N, OC, OH, OW),
lambda i, oc, h, w: te.sum(
data[i, ic, h + dh, w + dw] * weight[oc, ic, dh, dw], axis=[ic, dh, dw]
),
)
def test_with():
n = te.size_var("n")
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((n, l), name="A")
B = te.placeholder((m, l), name="B")
with tvm.te.tag_scope(tag="gemm"):
k = te.reduce_axis((0, l), name="k")
C = te.compute(
(n, m),
lambda i, j: te.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello": 1, "arr": [10, 12]},
)
assert C.op.tag == "gemm"
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.ir.load_json(tvm.ir.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
C = compute_conv(A, B)
assert C.op.tag == "conv"
assert len(C.op.attrs) == 0
def test_nested():
n = te.size_var("n")
c = te.size_var("c")
h = te.size_var("h")
w = te.size_var("w")
kh = te.size_var("kh")
kw = te.size_var("kw")
A = te.placeholder((n, c, h, w), name="A")
B = te.placeholder((c, c, kh, kw), name="B")
try:
with te.tag_scope(tag="conv"):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| 2,995 | 27.533333 | 83 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_thread_sync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def run_passes(func: tvm.tir.PrimFunc):
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
cuda_target = tvm.target.Target("cuda", host="llvm")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
mod = tvm.tir.transform.AnnotateDeviceRegions()(mod)
mod = tvm.tir.transform.SplitHostDevice()(mod)
return tvm.tir.transform.ThreadSync("shared")(mod)
@tvm.testing.requires_cuda
def test_thread_storage_sync():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, A2], stmt, None)
mod = run_passes(func)
f = mod["test_kernel"]
body_list = tvm.tir.stmt_list(f.body.body.body)
assert body_list[1].value.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync"))
@tvm.testing.requires_cuda
def test_sync_else_branch():
def ir(A, B):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", 1)
local = ib.allocate(A.dtype, (8,), name="buf_local", scope="local")
shared = ib.allocate(A.dtype, (8,), name="buf_shared", scope="shared")
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
local[i] = Aptr[i]
with ib.else_scope():
shared[i] = Aptr[i]
with ib.for_range(0, 8) as i:
with ib.if_scope(Aptr[i] < 0):
Bptr[i] = local[i]
with ib.else_scope():
Bptr[i] = shared[i]
return ib.get()
A = tvm.tir.decl_buffer((8,), "float32")
B = tvm.tir.decl_buffer((8,), "float32")
stmt = ir(A, B)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = run_passes(func)
assert "T.tvm_storage_sync" in str(mod)
@tvm.testing.requires_cuda
def test_sync_read_thread_id_independent_location():
@T.prim_func
def func(p0_arg: T.Buffer((1, 2, 1, 1), "float32"), p1: T.Buffer(2, "float32")) -> None:
threadIdx_x = T.env_thread("threadIdx.x")
blockIdx_x = T.env_thread("blockIdx.x")
p0 = T.Buffer([2], dtype="float32", data=p0_arg.data)
result_local = T.alloc_buffer([1], dtype="float32", scope="local")
temp_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
T.launch_thread(blockIdx_x, 8)
T.launch_thread(threadIdx_x, 4)
result_local[0] = T.float32(0)
if threadIdx_x < 1:
temp_shared[0] = p0[0]
result_local[0] = result_local[0] + temp_shared[0] * p1[0]
if threadIdx_x < 1:
temp_shared[0] = p0[1]
result_local[0] = result_local[0] + temp_shared[0] * p1[1]
mod = run_passes(func)
assert "T.tvm_storage_sync" in str(mod)
if __name__ == "__main__":
test_thread_storage_sync()
test_sync_else_branch()
test_sync_read_thread_id_independent_location()
| 4,403 | 33.952381 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_target_parser_mprofile.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Tests to verify Python interactions with Target Parsing
"""
import pytest
from tvm.target import Target
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_mve(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m7")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["c"], ["llvm"]])
def test_target_parser_mprofile_no_dsp(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m3")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert not parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
@pytest.mark.parametrize(["cpu_target"], [["llvm"]])
def test_target_parser_mprofile_mattr(cpu_target):
parsed_target = Target(f"{cpu_target} -mcpu=cortex-m55 -mattr=+nomve,+woof")
assert len(parsed_target.keys) == 2
assert parsed_target.keys[0] == "arm_cpu"
assert parsed_target.keys[1] == "cpu"
assert parsed_target.features
assert parsed_target.features.has_dsp
assert not parsed_target.features.has_mve
if __name__ == "__main__":
tvm.testing.main()
| 2,650 | 35.819444 | 80 | py |
tvm | tvm-main/tests/python/unittest/test_node_reflection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
import sys
import pytest
from tvm import te
import numpy as np
def test_const_saveload_json():
# save load json
x = tvm.tir.const(1, "int32")
y = tvm.tir.const(10, "int32")
z = x + y
z = z + z
json_str = tvm.ir.save_json(z)
zz = tvm.ir.load_json(json_str)
tvm.ir.assert_structural_equal(zz, z, map_free_vars=True)
def _test_infinity_value(value, dtype):
x = tvm.tir.const(value, dtype)
json_str = tvm.ir.save_json(x)
tvm.ir.assert_structural_equal(x, tvm.ir.load_json(json_str))
def test_infinity_value():
_test_infinity_value(float("inf"), "float64")
_test_infinity_value(float("-inf"), "float64")
_test_infinity_value(float("inf"), "float32")
_test_infinity_value(float("-inf"), "float32")
def _test_minmax_value(value):
json_str = tvm.ir.save_json(value)
tvm.ir.assert_structural_equal(value, tvm.ir.load_json(json_str))
def test_minmax_value():
_test_minmax_value(tvm.tir.min_value("float32"))
_test_minmax_value(tvm.tir.max_value("float32"))
def test_make_smap():
# save load json
x = tvm.tir.const(1, "int32")
y = tvm.tir.const(10, "int32")
z = tvm.tir.Add(x, y)
smap = tvm.runtime.convert({"z": z, "x": x})
json_str = tvm.ir.save_json(tvm.runtime.convert([smap]))
arr = tvm.ir.load_json(json_str)
assert len(arr) == 1
assert arr[0]["z"].a == arr[0]["x"]
tvm.ir.assert_structural_equal(arr, [smap], map_free_vars=True)
def test_make_node():
x = tvm.ir.make_node("IntImm", dtype="int32", value=10, span=None)
assert isinstance(x, tvm.tir.IntImm)
assert x.value == 10
A = te.placeholder((10,), name="A")
AA = tvm.ir.make_node(
"Tensor", shape=A.shape, dtype=A.dtype, op=A.op, value_index=A.value_index
)
assert AA.op == A.op
assert AA.value_index == A.value_index
y = tvm.ir.make_node("IntImm", dtype=tvm.runtime.String("int32"), value=10, span=None)
def test_make_sum():
A = te.placeholder((2, 10), name="A")
k = te.reduce_axis((0, 10), "k")
B = te.compute((2,), lambda i: te.sum(A[i, k], axis=k), name="B")
json_str = tvm.ir.save_json(B)
BB = tvm.ir.load_json(json_str)
assert B.op.body[0].combiner is not None
assert BB.op.body[0].combiner is not None
def test_env_func():
@tvm.register_func("test.env_func")
def test(x):
return x + 1
f = tvm.get_global_func("test.env_func")
x = tvm.ir.EnvFunc.get("test.env_func")
assert x.name == "test.env_func"
json_str = tvm.ir.save_json([x])
y = tvm.ir.load_json(json_str)[0]
assert y.name == x.name
assert y(1) == 2
assert y.func(1) == 2
x = tvm.ir.make_node("attrs.TestAttrs", name="xx", padding=(3, 4), func=y)
assert x.name == "xx"
assert x.padding[0].value == 3
assert x.padding[1].value == 4
assert x.axis == 10
x = tvm.ir.load_json(tvm.ir.save_json(x))
assert isinstance(x.func, tvm.ir.EnvFunc)
assert x.func(10) == 11
def test_string():
# non printable str, need to store by b64
s1 = tvm.runtime.String("xy\x01z")
s2 = tvm.ir.load_json(tvm.ir.save_json(s1))
tvm.ir.assert_structural_equal(s1, s2)
# printable str, need to store by repr_str
s1 = tvm.runtime.String("xyz")
s2 = tvm.ir.load_json(tvm.ir.save_json(s1))
tvm.ir.assert_structural_equal(s1, s2)
def test_pass_config():
cfg = tvm.transform.PassContext(
opt_level=1,
config={
"tir.UnrollLoop": {
"auto_max_step": 10,
}
},
)
cfg.opt_level == 1
assert cfg.config["tir.UnrollLoop"].auto_max_step == 10
# default option
assert cfg.config["tir.UnrollLoop"].explicit_unroll == True
# schema checking for specific config key
with pytest.raises(AttributeError):
cfg = tvm.transform.PassContext(config={"tir.UnrollLoop": {"invalid": 1}})
# schema check for un-registered config
with pytest.raises(AttributeError):
cfg = tvm.transform.PassContext(config={"inavlid-opt": True})
# schema check for wrong type
with pytest.raises(AttributeError):
cfg = tvm.transform.PassContext(config={"tir.UnrollLoop": 1})
def test_dict():
x = tvm.tir.const(1) # a class that has Python-defined methods
# instances should see the full class dict
assert set(dir(x.__class__)) <= set(dir(x))
def test_ndarray():
dev = tvm.cpu(0)
tvm_arr = tvm.nd.array(np.random.rand(4), device=dev)
tvm_arr2 = tvm.ir.load_json(tvm.ir.save_json(tvm_arr))
tvm.ir.assert_structural_equal(tvm_arr, tvm_arr2)
np.testing.assert_array_equal(tvm_arr.numpy(), tvm_arr2.numpy())
def test_ndarray_dict():
dev = tvm.cpu(0)
m1 = {
"key1": tvm.nd.array(np.random.rand(4), device=dev),
"key2": tvm.nd.array(np.random.rand(4), device=dev),
}
m2 = tvm.ir.load_json(tvm.ir.save_json(m1))
tvm.ir.assert_structural_equal(m1, m2)
def test_alloc_const():
dev = tvm.cpu(0)
dtype = "float32"
shape = (16,)
buf = tvm.tir.decl_buffer(shape, dtype)
np_data = np.random.rand(*shape).astype(dtype)
data = tvm.nd.array(np_data, device=dev)
body = tvm.tir.Evaluate(0)
alloc_const = tvm.tir.AllocateConst(buf.data, dtype, shape, data, body)
alloc_const2 = tvm.ir.load_json(tvm.ir.save_json(alloc_const))
tvm.ir.assert_structural_equal(alloc_const, alloc_const2)
np.testing.assert_array_equal(np_data, alloc_const2.data.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 6,316 | 30.585 | 90 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test cost models"""
import tempfile
import numpy as np
import tvm
from tvm import auto_scheduler
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
def get_sample_records(number):
"""Generate a list of random MeasureInput and MeasureResult pairs"""
N = 128
task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target="llvm")
policy = auto_scheduler.SketchPolicy(task, verbose=0)
states = policy.sample_initial_population()[:number]
inputs = [auto_scheduler.MeasureInput(task, s) for s in states]
results = [
auto_scheduler.MeasureResult([np.random.uniform(0.5, 1.0)], 0, "", 0.1, 0)
for _ in range(len(inputs))
]
return task, inputs, results
def test_random_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.RandomModel()
model.update(inputs, results)
scores = model.predict(task, [x.state for x in inputs])
assert len(scores) == len(inputs)
def test_xgb_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.XGBModel(num_warmup_sample=-1)
model.update(inputs, results)
preds = model.predict(task, [x.state for x in inputs])
assert len(preds) == len(inputs)
costs = [np.mean([x.value for x in res.costs]) for res in results]
throughputs = np.min(costs) / costs
# test regression quality
rmse = np.sqrt(np.mean([np.square(pred - label) for pred, label in zip(preds, throughputs)]))
assert rmse <= 0.3
# test loading a record file
tmpdir = tvm.contrib.utils.tempdir()
tmpfile = tmpdir.relpath("test1")
auto_scheduler.save_records(tmpfile, inputs, results)
model.update_from_file(tmpfile)
# test model serialization
tmpfile = tmpdir.relpath("test2")
model.save(tmpfile)
model.load(tmpfile)
if __name__ == "__main__":
test_random_model()
test_xgb_model()
| 2,714 | 30.941176 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_graph_tuner_core.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE: We name this test file to start with test_graph_tuner
# to make it execute after zero_rank tensor test cases. This
# helps avoid topi arithmetic operator overloading issue:
# https://github.com/apache/tvm/issues/3240.
# TODO: restore the file name after this issue is resolved.
import os
import copy
import numpy as np
import tvm
from tvm import te
import tvm.relay.testing
from tvm import autotvm
from tvm import relay
from tvm.autotvm.task import ConfigEntity
from tvm.autotvm.measure import MeasureResult, MeasureInput
from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner
def _create_args(dshape, kshape, strides, padding, dilation, layout, out_layout, dtype, out_dtype):
data = tvm.te.placeholder(dshape, dtype=dtype)
kernel = tvm.te.placeholder(kshape, dtype=dtype)
return autotvm.task.serialize_args(
[data, kernel, strides, padding, dilation, layout, layout, out_dtype]
)
def _create_data(target, dshape, dtype, layout):
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(conv1, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.add(conv1, conv2)
net = relay.Function(relay.analysis.free_vars(out), out)
mod, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 32, 8, 8),
(32, 32, 3, 3),
(1, 1),
(1, 1, 1, 1),
(1, 1),
layout,
layout,
dtype,
dtype,
),
]
costs = [0.04, 0.012, 0.03]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
ltf_keys = []
ltf_arg = [te.placeholder((1, 4, 8, 8, 4), dtype=dtype), "NCHW4c", "NCHW8c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
ltf_arg = [te.placeholder((1, 1, 8, 8, 32), dtype=dtype), "NCHW32c", "NCHW4c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
ltf_arg = [te.placeholder((1, 4, 8, 8, 8), dtype=dtype), "NCHW8c", "NCHW32c"]
ltf_wkl = autotvm.task.args_to_workload(ltf_arg, "layout_transform")
ltf_keys.append(ltf_wkl)
return net, records, ltf_records, ltf_keys, tasks
def test_graph_tuner_layout_transform():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records:
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert (
expected_time == out_time
), "Inferred layout transformation time mismatch for %s: " "expecting %f but got %f" % (
str(ltf_workload),
expected_time,
out_time,
)
def test_graph_tuner_layout_transform_runner():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dshape = (1, 3, 8, 8)
dtype = "float32"
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, _ = _create_data(target, dshape, dtype, layout)
executor = DPTuner(g, {"data": dshape}, records, target_ops, target=target, log_file=log_file)
runner = autotvm.LocalRunner(number=100, repeat=1, timeout=10)
executor.benchmark_layout_transform(
layout_records=ltf_records, infer_layout=True, runner=runner
)
out = executor._layout_transform_perf_records
num_flops = 0
total_time = 0
for record in ltf_records:
ltf_wkl = record[0].task.workload
input_shape = ltf_wkl[1][1]
flops = np.prod(input_shape)
num_flops += flops
total_time += record[1].costs[0]
avg_time = total_time / num_flops
for ltf_workload in out:
input_shape = ltf_workload[1][1]
flops = 1
for i in input_shape:
flops *= i
expected_time = flops * avg_time
out_time = out[ltf_workload][1].costs[0]
assert (
expected_time == out_time
), "Inferred layout transformation time mismatch for %s: " "expecting %f but got %f" % (
str(ltf_workload),
expected_time,
out_time,
)
def test_DPTuner_run():
log_file = "%s/test_tuner.log" % (os.getcwd())
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
mod = tvm.IRModule()
mod["main"] = g
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = DPTuner(mod, {"data": dshape}, records, target_ops, target, log_file=log_file)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
assert os.path.isfile(log_file), "No log file with name %s exists." % log_file
def test_PBQPTuner_run():
target = "llvm"
dtype = "float32"
layout = "NCHW"
dshape = (1, 3, 8, 8)
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
g, records, ltf_records, ltf_keys, tasks = _create_data(target, dshape, dtype, layout)
costs = [0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
for cost, config, task in zip(costs, config_list, tasks):
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
executor = PBQPTuner(g, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_many_sub_graphs():
target = "llvm"
dtype = "float32"
dshape = (1, 8, 8, 3)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
t0 = relay.transpose(data, (0, 3, 1, 2))
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(t0, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
t1 = relay.transpose(conv0, (0, 2, 3, 1))
w1 = relay.var("w1_weight")
t2 = relay.transpose(t1, (0, 3, 1, 2))
conv1 = relay.nn.conv2d(t2, w1, channels=32, kernel_size=(1, 1))
t3 = relay.transpose(conv1, (0, 2, 3, 1))
w2 = relay.var("w2_weight")
t4 = relay.transpose(t3, (0, 3, 1, 2))
conv2 = relay.nn.conv2d(t4, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
t5 = relay.transpose(conv2, (0, 2, 3, 1))
out = relay.add(t3, t5)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 32, 8, 8),
(32, 32, 3, 3),
(1, 1),
(1, 1, 1, 1),
(1, 1),
layout,
layout,
dtype,
dtype,
),
]
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_tuple():
target = "llvm"
dtype = "float32"
dshape = (1, 5, 32, 32)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=2, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(data, w1, channels=3, kernel_size=(3, 3), padding=(1, 1))
out = relay.concatenate([conv0, conv1], axis=1)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 5, 32, 32), (2, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 5, 32, 32), (3, 5, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
]
costs = [0.01, 0.012, 0.03, 0.04]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [1, 2]],
["tile_ow", "sp", [4, 8]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [1, 3]],
["tile_ow", "sp", [2, 16]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [2, 1]],
["tile_ow", "sp", [4, 8]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 5]],
["tile_oc", "sp", [3, 1]],
["tile_ow", "sp", [2, 16]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[2][0].config, records[1][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[2][0].config, records[1][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
def test_triangle_block():
target = "llvm"
dtype = "float32"
dshape = (1, 3, 8, 8)
layout = "NCHW"
conv2d = relay.op.get("nn.conv2d")
target_ops = [conv2d]
data = relay.var("data", shape=dshape, dtype=dtype)
w0 = relay.var("w0_weight")
conv0 = relay.nn.conv2d(data, w0, channels=16, kernel_size=(3, 3), padding=(1, 1))
w1 = relay.var("w1_weight")
conv1 = relay.nn.conv2d(conv0, w1, channels=32, kernel_size=(1, 1))
w2 = relay.var("w2_weight")
conv2 = relay.nn.conv2d(data, w2, channels=32, kernel_size=(3, 3), padding=(1, 1))
out = relay.concatenate([conv0, conv1, conv2], axis=1)
net = relay.Function(relay.analysis.free_vars(out), out)
net, params = relay.testing.create_workload(net)
tasks = autotvm.task.extract_from_program(
net["main"], target=target, params=params, ops=(conv2d,)
)
new_args = [
_create_args(
(1, 3, 8, 8), (16, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
_create_args(
(1, 16, 8, 8),
(32, 16, 1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 1),
layout,
layout,
dtype,
dtype,
),
_create_args(
(1, 3, 8, 8), (32, 3, 3, 3), (1, 1), (1, 1, 1, 1), (1, 1), layout, layout, dtype, dtype
),
]
costs = [0.04, 0.012, 0.03, 0.02, 0.02, 0.045]
config_list = []
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [3, 1]],
["tile_oc", "sp", [4, 4]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [2, 8]],
["tile_oc", "sp", [1, 32]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [8, 4]],
["tile_oc", "sp", [4, 8]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [1, 3]],
["tile_oc", "sp", [2, 8]],
["tile_ow", "sp", [4, 2]],
["unroll_kw", "ot", True],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [4, 4]],
["tile_oc", "sp", [2, 16]],
["tile_oh", "ot", 1],
["tile_ow", "sp", [4, 2]],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
cfg_dict = {
"index": -1,
"code_hash": None,
"entity": [
["tile_ic", "sp", [16, 2]],
["tile_oc", "sp", [8, 4]],
["tile_ow", "sp", [2, 4]],
["unroll_kw", "ot", False],
],
}
config_list.append(ConfigEntity.from_json_dict(cfg_dict))
records = []
new_args = new_args + new_args
tasks = tasks + tasks
for args, cost, config, task in zip(new_args, costs, config_list, tasks):
task.args = args
ms_input = MeasureInput(target=target, task=task, config=config)
ms_output = MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
records.append((ms_input, ms_output))
ltf_records = []
ltf_arg = [te.placeholder((1, 64, 16, 16, 8), dtype=dtype), "NCHW8c", "NCHW512c"]
ltf_task = autotvm.task.create("layout_transform", ltf_arg, target)
ms_input = MeasureInput(target=target, task=ltf_task, config=None)
ms_output = MeasureResult(costs=(1.91224744e-05,), error_no=0, all_cost=-1, timestamp=-1)
ltf_records.append((ms_input, ms_output))
executor = DPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
executor = PBQPTuner(net, {"data": dshape}, records, target_ops, target)
executor.benchmark_layout_transform(layout_records=ltf_records, infer_layout=True)
executor.run()
out = [record[0].config for record in executor.get_optimal_records()]
expected_out = [records[3][0].config, records[1][0].config, records[2][0].config]
assert expected_out == out, "Output mismatch: expecting %s but got %s" % (
str(expected_out),
str(out),
)
if __name__ == "__main__":
test_graph_tuner_layout_transform()
test_DPTuner_run()
test_PBQPTuner_run()
test_many_sub_graphs()
test_tuple()
test_triangle_block()
| 27,234 | 33.650127 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_rfactor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir, topi
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def transformed_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk])
@T.prim_func
def transformed_matmul_with_let(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update"):
vi, vj = T.axis.remap("SS", [i0, i1])
vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner)
T.reads([A[vi, vk], B[vj, vk]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
v_C: T.float32 = C[vi, vj] + (A[vi, vk] * B[vj, vk])
C[vi, vj] = v_C
@T.prim_func
def matmul_rfactor(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
C = T.match_buffer(c, [128, 128], dtype="float32")
C_rf = T.alloc_buffer([4, 128, 128], dtype="float32")
for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4):
with T.block("update_rf"):
vi2_inner_inner = T.axis.S(4, i2_inner_inner)
vi = T.axis.S(128, i0)
vj = T.axis.S(128, i1)
vi2_outer = T.axis.R(4, i2_outer)
vi2_inner_outer = T.axis.R(8, i2_inner_outer)
with T.init():
C_rf[vi2_inner_inner, vi, vj] = 0.0
C_rf[vi2_inner_inner, vi, vj] = C_rf[vi2_inner_inner, vi, vj] + (
A[vi, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
* B[vj, (((vi2_outer * 32) + (vi2_inner_outer * 4)) + vi2_inner_inner)]
)
for i0_1, i1_1, i2_inner_inner_1 in T.grid(128, 128, 4):
with T.block("update"):
vi2_inner_inner_1, vi_1, vj_1 = T.axis.remap("RSS", [i2_inner_inner_1, i0_1, i1_1])
with T.init():
C[vi_1, vj_1] = 0.0
C[vi_1, vj_1] = C[vi_1, vj_1] + C_rf[vi2_inner_inner_1, vi_1, vj_1]
@T.prim_func
def matmul_not_stage_pipeline(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [256, 256])
B = T.match_buffer(b, [256, 256])
D = T.match_buffer(d, [256, 256])
C = T.alloc_buffer([256, 256])
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(256, 256):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = C[vi, vj]
@T.prim_func
def matmul_not_same_buffer_access(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vj, vi] = C[vj, vi] + A[vi, vk] * B[vk, vj]
@T.prim_func
def matmul_loop_multiple_children(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
D = T.match_buffer(d, [128, 128])
for k, i, j in T.grid(128, 128, 128):
with T.block("C"):
ck, ci, cj = T.axis.remap("RSS", [k, i, j])
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, ck] * B[ck, cj]
with T.block("D"):
dk, di, dj = T.axis.remap("RSS", [k, i, j])
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + B[di, dk] * A[dk, dj]
@T.prim_func
def square_sum(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1])
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def transformed_square_sum_square_root(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
T.reads([A[b, i, j]])
T.writes([C[b]])
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
T.reads([C[b_1]])
T.writes([D[b_1]])
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_rfactor(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
C_rf = T.alloc_buffer([1, 16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
vi1_i2_fused_inner, b = T.axis.remap("SS", [i1_i2_fused_inner, i0])
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C_rf[vi1_i2_fused_inner, b] = 0.0
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + (A[b, i, j] * A[b, i, j])
for i0_1, i1_i2_fused_inner_1 in T.grid(16, 1):
with T.block("C"):
vi1_i2_fused_inner_1, b_1 = T.axis.remap("RS", [i1_i2_fused_inner_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[vi1_i2_fused_inner_1, b_1]
for i0_2 in T.serial(0, 16):
with T.block("D"):
b_2 = T.axis.S(16, i0_2)
D[b_2] = T.sqrt(C[b_2], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_1(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_outer, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_outer, 256))
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_1_rfactor(
A: T.Buffer((16, 256, 256), "float32"), D: T.Buffer((16,), "float32")
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([1, 16], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 65536, 1):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_outer // 256)
j = T.axis.reduce(256, i1_i2_fused_outer % 256)
vi1_i2_fused_inner = T.axis.spatial(1, i1_i2_fused_inner)
with T.init():
C_rf[vi1_i2_fused_inner, b] = T.float32(0)
C_rf[vi1_i2_fused_inner, b] = C_rf[vi1_i2_fused_inner, b] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_inner in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_inner = T.axis.remap("SR", [i0, i1_i2_fused_inner])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[vi1_i2_fused_inner, b]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def transformed_square_sum_square_root_factor_one_2(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
D = T.match_buffer(d, [16])
C = T.alloc_buffer([16])
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C"):
b = T.axis.S(16, i0)
i = T.axis.R(256, T.floordiv(i1_i2_fused_inner, 256))
j = T.axis.R(256, T.floormod(i1_i2_fused_inner, 256))
with T.init():
C[b] = 0.0
C[b] = C[b] + (A[b, i, j] * A[b, i, j])
for i0_1 in T.serial(0, 16):
with T.block("D"):
b_1 = T.axis.S(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_square_root_factor_one_2_rfactor(
A: T.Buffer((16, 256, 256), "float32"), D: T.Buffer((16,), "float32")
) -> None:
C = T.alloc_buffer([16], dtype="float32")
C_rf = T.alloc_buffer([16, 1], dtype="float32")
for i0, i1_i2_fused_outer, i1_i2_fused_inner in T.grid(16, 1, 65536):
with T.block("C_rf"):
b = T.axis.spatial(16, i0)
i = T.axis.reduce(256, i1_i2_fused_inner // 256)
j = T.axis.reduce(256, i1_i2_fused_inner % 256)
vi1_i2_fused_outer = T.axis.spatial(1, i1_i2_fused_outer)
with T.init():
C_rf[b, vi1_i2_fused_outer] = T.float32(0)
C_rf[b, vi1_i2_fused_outer] = C_rf[b, vi1_i2_fused_outer] + A[b, i, j] * A[b, i, j]
for i0, i1_i2_fused_outer in T.grid(16, 1):
with T.block("C"):
b, vi1_i2_fused_outer = T.axis.remap("SR", [i0, i1_i2_fused_outer])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + C_rf[b, vi1_i2_fused_outer]
for i0_1 in T.serial(16):
with T.block("D"):
b_1 = T.axis.spatial(16, i0_1)
D[b_1] = T.sqrt(C[b_1], dtype="float32")
@T.prim_func
def square_sum_with_annotation(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
for b0, i0, j0 in T.grid(16, 256, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
b, i, j = T.axis.remap("SRR", [b0, i0, j0])
with T.init():
C[b] = 0.0
C[b] = C[b] + A[b, i, j] * A[b, i, j]
@T.prim_func
def square_sum_with_annotation_rfactor(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [16, 256, 256])
C = T.match_buffer(c, [16])
C_rf = T.alloc_buffer([16, 256])
for i0, i1, i2 in T.grid(16, 256, 256):
with T.block("C_rf"):
T.block_attr({"test_annotation": 1})
vi2, b, i = T.axis.remap("SSR", [i2, i0, i1])
with T.init():
C_rf[b, vi2] = 0.0
C_rf[b, vi2] = C_rf[b, vi2] + (A[b, i, vi2] * A[b, i, vi2])
for i0_1, i2_1 in T.grid(16, 256):
with T.block("C"):
T.block_attr({"test_annotation": 1})
vi2_1, b_1 = T.axis.remap("RS", [i2_1, i0_1])
with T.init():
C[b_1] = 0.0
C[b_1] = C[b_1] + C_rf[b_1, vi2_1]
@T.prim_func
def element_wise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def rowsum(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 16):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, T.floordiv(k * k, 2))
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_not_dominant(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi, vk] = 0.0
B[vi, vk] = B[vi, vk] + A[vi, vk]
@T.prim_func
def rowsum_not_serial(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i in T.serial(0, 128):
for k in T.parallel(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern1(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 1.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_wrong_reduce_pattern2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0.0
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def rowsum_init_not_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for i, k in T.grid(128, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
v_init: T.float32 = T.float32(0)
B[vi] = v_init
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_transformed(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128,))
for io, ii_ko_fused, ki in T.grid(32, 128, 4):
with T.block("B"):
vi = T.axis.S(128, io * 4 + T.floordiv(ii_ko_fused, 32))
vk = T.axis.R(128, T.floormod(ii_ko_fused, 32) * 4 + ki)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_zero_dim(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
for k0 in range(128):
with T.block("B"):
k = T.axis.R(128, k0)
with T.init():
B[()] = 0.0
B[()] = B[()] + A[k]
@T.prim_func
def rowsum_zero_dim_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128])
B = T.match_buffer(b, [])
B_rf = T.alloc_buffer([128])
for i in range(128):
with T.block("B_rf"):
vi0 = T.axis.S(128, i)
B_rf[vi0] = A[vi0]
for i in range(128):
with T.block("B"):
vi0_1 = T.axis.R(128, i)
with T.init():
B[()] = 0.0
B[()] = B[()] + B_rf[vi0_1]
@T.prim_func
def rowsum_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B"):
T.where(k_0 * 10 + k_1 < 128)
vi = T.axis.S(128, i)
vk = T.axis.R(128, k_0 * 10 + k_1)
with T.init():
B[vi] = 0.0
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def rowsum_predicate_rfactor(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
B_rf = T.alloc_buffer([128, 13], dtype="float32")
for i, k_0, k_1 in T.grid(128, 13, 10):
with T.block("B_rf"):
vk_0, vi, vk_1 = T.axis.remap("SSR", [k_0, i, k_1])
T.where(k_0 * 10 + k_1 < 128)
with T.init():
B_rf[vi, vk_0] = T.float32(0)
B_rf[vi, vk_0] = B_rf[vi, vk_0] + A[vi, vk_0 * 10 + vk_1]
for i, k_0 in T.grid(128, 13):
with T.block("B"):
vk_0, vi = T.axis.remap("RS", [k_0, i])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf[vi, vk_0]
@T.prim_func
def multiple_reduction_blocks(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, (16, 16, 16))
C = T.alloc_buffer((16, 16))
D = T.alloc_buffer((16, 16))
E = T.alloc_buffer((16, 16))
F = T.match_buffer(f, (16, 16))
for i in T.serial(0, 16):
for j1 in T.serial(0, 16):
for k1o, k1i in T.grid(4, 4):
with T.block("C"):
ci, cj = T.axis.remap("SS", [i, j1])
ck = T.axis.R(16, k1o * 4 + k1i)
with T.init():
C[ci, cj] = 0.0
C[ci, cj] = C[ci, cj] + A[ci, cj, ck]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i, j1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = D[di, dj] + A[di, dj, dk] + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = E[ei, ej] + A[ei, ej, ek] + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = F[fi, fj] + A[fi, fj, fk] + E[fi, fj]
@T.prim_func
def multiple_reduction_blocks_rfactor(a: T.handle, f: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16])
C = T.alloc_buffer([16, 16])
D = T.alloc_buffer([16, 16])
E = T.alloc_buffer([16, 16])
F = T.match_buffer(f, [16, 16])
C_rf = T.alloc_buffer([16, 16, 4])
for i, j1, k1o, k1i in T.grid(16, 16, 4, 4):
with T.block("C_rf"):
vk1o, ci, cj, vk1i = T.axis.remap("SSSR", [k1o, i, j1, k1i])
with T.init():
C_rf[ci, cj, vk1o] = 0.0
C_rf[ci, cj, vk1o] = C_rf[ci, cj, vk1o] + A[ci, cj, ((vk1o * 4) + vk1i)]
for i_1 in T.serial(0, 16):
for j1_1 in T.serial(0, 16):
for k1o_1 in T.serial(0, 4):
with T.block("C"):
vk1o_1, ci_1, cj_1 = T.axis.remap("RSS", [k1o_1, i_1, j1_1])
with T.init():
C[ci_1, cj_1] = 0.0
C[ci_1, cj_1] = C[ci_1, cj_1] + C_rf[ci_1, cj_1, vk1o_1]
for k2o, k2i in T.grid(4, 4):
with T.block("D"):
di, dj = T.axis.remap("SS", [i_1, j1_1])
dk = T.axis.R(16, k2o * 4 + k2i)
with T.init():
D[di, dj] = 0.0
D[di, dj] = (D[di, dj] + A[di, dj, dk]) + C[di, dj]
for j2 in T.serial(0, 16):
for k3o, k3i in T.grid(4, 4):
with T.block("E"):
ei, ej = T.axis.remap("SS", [i_1, j2])
ek = T.axis.R(16, k3o * 4 + k3i)
with T.init():
E[ei, ej] = 0.0
E[ei, ej] = (E[ei, ej] + A[ei, ej, ek]) + D[ei, ej]
for k4o, k4i in T.grid(4, 4):
with T.block("F"):
fi, fj = T.axis.remap("SS", [i_1, j2])
fk = T.axis.R(16, k4o * 4 + k4i)
with T.init():
F[fi, fj] = 0.0
F[fi, fj] = (F[fi, fj] + A[fi, fj, fk]) + E[fi, fj]
@T.prim_func
def rfactor_spatial_only(
A: T.Buffer((1, 512, 7, 7), "float32"),
B: T.Buffer((1, 512, 1, 1), "float32"),
) -> None:
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
rv0 = T.axis.reduce(7, i4 // 7)
rv1 = T.axis.reduce(7, i4 % 7)
T.reads(A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1])
T.writes(B[ax0, ax1, ax2, ax3])
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = (
B[ax0, ax1, ax2, ax3] + A[ax0, ax1, ax2 * 7 + rv0, ax3 * 7 + rv1]
)
@T.prim_func
def rfactor_spatial_only_after(
A: T.Buffer((1, 512, 7, 7), "float32"),
B: T.Buffer((1, 512, 1, 1), "float32"),
) -> None:
# body
# with T.block("root")
B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32")
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc_rf"):
vi4 = T.axis.spatial(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
B_rf[ax0, ax1, ax2, ax3, vi4] = A[ax0, ax1, ax2 * 7 + vi4 // 7, ax3 * 7 + vi4 % 7]
for _i0, i1, _i2, _i3, i4, _i5 in T.grid(1, 512, 1, 1, 49, 1):
with T.block("acc"):
vi4 = T.axis.reduce(49, i4)
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(512, i1)
ax2 = T.axis.spatial(1, 0)
ax3 = T.axis.spatial(1, 0)
with T.init():
B[ax0, ax1, ax2, ax3] = T.float32(0)
B[ax0, ax1, ax2, ax3] = B[ax0, ax1, ax2, ax3] + B_rf[ax0, ax1, ax2, ax3, vi4]
@T.prim_func
def argmax_split(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_init_update_reordered(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmin_v0: T.Buffer((128,), "int32"),
argmin_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.max_value("float32")
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k])
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def argmax_split_different_shape(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((256,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_different_indices(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i + 1] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i + 1] = v_argmax_v1
@T.prim_func
def argmax_split_init_not_bufferstore(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v1_init: T.float32 = T.min_value("float32")
argmax_v1[i] = v1_init
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_duplicate(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v0[i] = -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_letstmt_fewer_than_init(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
@T.prim_func
def argmax_split_letstmt_more_than_init(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_let_body_neither_seqstmt_nor_bufferstore(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
T.evaluate(0)
@T.prim_func
def argmax_split_init_update_inconsistent_bufferstore_number(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_seq_not_bufferstore(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
T.evaluate(0)
@T.prim_func
def argmax_split_body_bufferstore_value_not_var(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_body_bufferstore_value_unbound_var(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
v_unbound = T.int32()
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_unbound
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_one_let_var_used_multi_times(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "int32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "int32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v0
@T.prim_func
def argmax_split_body_one_buffer_updated_multi_times(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "int32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "int32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("int32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v0[i] = v_argmax_v1
@T.prim_func
def argmax_split_init_buffer_not_match(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v0_1: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v0_1[i], argmax_v1[i])
with T.init():
argmax_v0_1[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_split_rfactor(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
argmax_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmax_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
with T.init():
argmax_v0_rf[i, vi1_1] = -1
argmax_v1_rf[i, vi1_1] = T.min_value("float32")
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 32 + vi1_1],
argmax_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmax_v0_rf[i, vi1_1] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_1] = v_argmax_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmax"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v0[i], argmax_v0_rf[i, vi1_1]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v1[i], argmax_v1_rf[i, vi1_1]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmin_split_rfactor(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmin_v0: T.Buffer((128,), "int32"),
argmin_v1: T.Buffer((128,), "float32"),
) -> None:
argmin_v0_rf = T.alloc_buffer([128, 32], dtype="int32")
argmin_v1_rf = T.alloc_buffer([128, 32], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 4, 32):
with T.block("argmin_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 32 + vi1_1], val[i, vi1_0 * 32 + vi1_1])
T.writes(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
with T.init():
argmin_v0_rf[i, vi1_1] = -1
argmin_v1_rf[i, vi1_1] = T.max_value("float32")
v_argmin_v0_rf: T.int32 = T.Select(
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v0_rf[i, vi1_1],
idx[i, vi1_0 * 32 + vi1_1],
)
v_argmin_v1_rf: T.float32 = T.Select(
argmin_v1_rf[i, vi1_1] <= val[i, vi1_0 * 32 + vi1_1],
argmin_v1_rf[i, vi1_1],
val[i, vi1_0 * 32 + vi1_1],
)
argmin_v0_rf[i, vi1_1] = v_argmin_v0_rf
argmin_v1_rf[i, vi1_1] = v_argmin_v1_rf
for i0, i1_1 in T.grid(128, 32):
with T.block("argmin"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmin_v0_rf[i, vi1_1], argmin_v1_rf[i, vi1_1])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v0[i] = -1
argmin_v1[i] = T.max_value("float32")
v_argmin_v0: T.int32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v0[i], argmin_v0_rf[i, vi1_1]
)
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= argmin_v1_rf[i, vi1_1], argmin_v1[i], argmin_v1_rf[i, vi1_1]
)
argmin_v0[i] = v_argmin_v0
argmin_v1[i] = v_argmin_v1
@T.prim_func
def argmax_topi_rfactor(
placeholder: T.Buffer((1, 32), "int32"), placeholder_red: T.Buffer(1, "int32")
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = -2147483648
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] > placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
placeholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = -2147483648
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] > placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
@T.prim_func
def argmin_topi_rfactor(
placeholder: T.Buffer((1, 32), "int32"), placeholder_red: T.Buffer(1, "int32")
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
placeholder_red_temp_v0 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v1 = T.alloc_buffer([1], dtype="int32")
placeholder_red_temp_v0_rf = T.alloc_buffer([1, 8], dtype="int32")
placeholder_red_temp_v1_rf = T.alloc_buffer([1, 8], dtype="int32")
for i0, i1_0, i1_1 in T.grid(1, 4, 8):
with T.block("placeholder_red_temp_rf"):
vi1_1, ax0, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(placeholder[ax0, vi1_0 * 8 + vi1_1])
T.writes(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
with T.init():
placeholder_red_temp_v0_rf[ax0, vi1_1] = -1
placeholder_red_temp_v1_rf[ax0, vi1_1] = 2147483647
v_placeholder_red_temp_v0_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1]
or placeholder_red_temp_v1_rf[ax0, vi1_1] == placeholder[ax0, vi1_0 * 8 + vi1_1]
and placeholder_red_temp_v0_rf[ax0, vi1_1] < vi1_0 * 8 + vi1_1,
placeholder_red_temp_v0_rf[ax0, vi1_1],
vi1_0 * 8 + vi1_1,
)
v_placeholder_red_temp_v1_rf: T.int32 = T.Select(
placeholder_red_temp_v1_rf[ax0, vi1_1] < placeholder[ax0, vi1_0 * 8 + vi1_1],
placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder[ax0, vi1_0 * 8 + vi1_1],
)
placeholder_red_temp_v0_rf[ax0, vi1_1] = v_placeholder_red_temp_v0_rf
placeholder_red_temp_v1_rf[ax0, vi1_1] = v_placeholder_red_temp_v1_rf
for i0, i1_1 in T.grid(1, 8):
with T.block("placeholder_red_temp"):
vi1_1, ax0 = T.axis.remap("RS", [i1_1, i0])
T.reads(placeholder_red_temp_v0_rf[ax0, vi1_1], placeholder_red_temp_v1_rf[ax0, vi1_1])
T.writes(placeholder_red_temp_v0[ax0], placeholder_red_temp_v1[ax0])
with T.init():
placeholder_red_temp_v0[ax0] = -1
placeholder_red_temp_v1[ax0] = 2147483647
v_placeholder_red_temp_v0: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1]
or placeholder_red_temp_v1[ax0] == placeholder_red_temp_v1_rf[ax0, vi1_1]
and placeholder_red_temp_v0[ax0] < placeholder_red_temp_v0_rf[ax0, vi1_1],
placeholder_red_temp_v0[ax0],
placeholder_red_temp_v0_rf[ax0, vi1_1],
)
v_placeholder_red_temp_v1: T.int32 = T.Select(
placeholder_red_temp_v1[ax0] < placeholder_red_temp_v1_rf[ax0, vi1_1],
placeholder_red_temp_v1[ax0],
placeholder_red_temp_v1_rf[ax0, vi1_1],
)
placeholder_red_temp_v0[ax0] = v_placeholder_red_temp_v0
placeholder_red_temp_v1[ax0] = v_placeholder_red_temp_v1
for i0 in T.serial(1):
with T.block("placeholder_red"):
ax0 = T.axis.spatial(1, i0)
T.reads(placeholder_red_temp_v0[ax0])
T.writes(placeholder_red[ax0])
placeholder_red[ax0] = placeholder_red_temp_v0[ax0]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_reduction_rfactor_matmul():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_matmul_with_let():
s = tir.Schedule(transformed_matmul_with_let, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, 0)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul_with_let)
def test_reduction_rfactor_square_sum():
s = tir.Schedule(square_sum, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum)
def test_reduction_rfactor_square_sum_square_root():
s = tir.Schedule(transformed_square_sum_square_root, debug_mask="all")
C = s.get_block("C")
_, _, f_i = s.get_loops(C)
rf_block = s.rfactor(f_i, 0)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_square_root_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=transformed_square_sum_square_root)
def test_reduction_rfactor_loop_multiple_children():
s = tir.Schedule(matmul_loop_multiple_children, debug_mask="all")
k, _, _ = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_stage_pipeline():
s = tir.Schedule(matmul_not_stage_pipeline, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block1():
s = tir.Schedule(element_wise, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_not_reduction_block2():
s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_reduction_block3():
s = tir.Schedule(rowsum_not_dominant, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_serial_loop():
s = tir.Schedule(rowsum_not_serial, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_not_same_buffer_access():
s = tir.Schedule(matmul_not_same_buffer_access, debug_mask="all")
_, _, k = s.get_loops(s.get_block("C"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_factor_axis_range_fail():
s = tir.Schedule(transformed_matmul, debug_mask="all")
_, _, _, _, kii = s.get_loops(s.get_block("update"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, 3)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(kii, -4)
def test_reduction_rfactor_factor_axis_range():
s = tir.Schedule(transformed_matmul, debug_mask="all")
update = s.get_block("update")
_, _, _, _, kii = s.get_loops(update)
rf_block = s.rfactor(kii, -3)
tvm.ir.assert_structural_equal(s.mod["main"], matmul_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("update_rf")))
assert s.get(update).same_as(s.get(s.get_block("update")))
verify_trace_roundtrip(s, mod=transformed_matmul)
def test_reduction_rfactor_wrong_reduce_pattern1():
s = tir.Schedule(rowsum_wrong_reduce_pattern1, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_reduce_pattern2():
s = tir.Schedule(rowsum_wrong_reduce_pattern2, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_init_not_bufferstore():
s = tir.Schedule(rowsum_init_not_bufferstore, debug_mask="all")
_, k = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k, 0)
def test_reduction_rfactor_wrong_loops1():
s = tir.Schedule(rowsum, debug_mask="all")
i, _ = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(i, 0)
def test_reduction_rfactor_wrong_loops2():
s = tir.Schedule(rowsum_transformed, debug_mask="all")
_, _, k_i = s.get_loops(s.get_block("B"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k_i, 0)
def test_reduction_rfactor_zero_dim():
s = tir.Schedule(rowsum_zero_dim, debug_mask="all")
B = s.get_block("B")
(k,) = s.get_loops(B)
rf_block = s.rfactor(k, 0)
tvm.ir.assert_structural_equal(s.mod["main"], rowsum_zero_dim_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("B_rf")))
assert s.get(B).same_as(s.get(s.get_block("B")))
verify_trace_roundtrip(s, mod=rowsum_zero_dim)
def test_reduction_rfactor_outermost_loop_multiple_children_fail(): # pylint: disable=invalid-name
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
_, _, k2o, k2i = s.get_loops(s.get_block("D"))
_, _, k3o, k3i = s.get_loops(s.get_block("E"))
_, _, k4o, k4i = s.get_loops(s.get_block("F"))
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k2i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k3i, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4o, 0)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(k4i, 0)
def test_reduction_rfactor_outermost_loop_multiple_children(): # pylint: disable=invalid-name
s = tir.Schedule(multiple_reduction_blocks, debug_mask="all")
C = s.get_block("C")
_, _, k1o, _ = s.get_loops(C)
rf_block = s.rfactor(k1o, 2)
tvm.ir.assert_structural_equal(s.mod["main"], multiple_reduction_blocks_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=multiple_reduction_blocks)
def test_reduction_rfactor_predicate(): # pylint: disable=invalid-name
s = tir.Schedule(rowsum_predicate, debug_mask="all")
B = s.get_block("B")
_, ko, _ = s.get_loops(B)
# TODO: should be a tvm.tir.ScheduleError
with pytest.raises(tvm.TVMError):
rf_block = s.rfactor(ko, 1)
def test_reduction_rfactor_with_annotation():
s = tir.Schedule(square_sum_with_annotation, debug_mask="all")
C = s.get_block("C")
_, _, j = s.get_loops(C)
rf_block = s.rfactor(j, 1)
tvm.ir.assert_structural_equal(s.mod["main"], square_sum_with_annotation_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("C_rf")))
assert s.get(C).same_as(s.get(s.get_block("C")))
verify_trace_roundtrip(s, mod=square_sum_with_annotation)
def test_reduction_rfactor_spatial_only():
s = tir.Schedule(rfactor_spatial_only, debug_mask="all")
block = s.get_block(name="acc", func_name="main")
_, _, _, _, loop, _ = s.get_loops(block)
rf_block = s.rfactor(loop=loop, factor_axis=4)
tvm.ir.assert_structural_equal(s.mod["main"], rfactor_spatial_only_after)
assert s.get(rf_block).same_as(s.get(s.get_block("acc_rf")))
assert s.get(block).same_as(s.get(s.get_block("acc")))
verify_trace_roundtrip(s, mod=rfactor_spatial_only)
def test_reduction_rfactor_argmax():
s = tir.Schedule(argmax_split, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmax_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("argmax")))
verify_trace_roundtrip(s, mod=argmax_split)
def test_reduction_rfactor_argmin_init_update_reordeded():
s = tir.Schedule(argmin_split_init_update_reordered, debug_mask="all")
argmin = s.get_block("argmin")
_, _, ki = s.get_loops(argmin)
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_split_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("argmin_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("argmin")))
verify_trace_roundtrip(s, mod=argmin_split_init_update_reordered)
def test_reduction_rfactor_argmax_reduction_buffer_different_shape():
s = tir.Schedule(argmax_split_different_shape, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_different_access_indices():
s = tir.Schedule(argmax_split_different_indices, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_not_bufferstore():
s = tir.Schedule(argmax_split_init_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_duplicate():
s = tir.Schedule(argmax_split_init_buffer_duplicate, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_fewer_than_init():
s = tir.Schedule(argmax_split_letstmt_fewer_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_letstmt_more_than_init():
s = tir.Schedule(argmax_split_letstmt_more_than_init, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_let_body_neither_seqstmt_nor_bufferstore():
s = tir.Schedule(argmax_split_let_body_neither_seqstmt_nor_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_update_inconsistent_bufferstore_number():
s = tir.Schedule(argmax_split_init_update_inconsistent_bufferstore_number, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_seq_not_bufferstore():
s = tir.Schedule(argmax_split_body_seq_not_bufferstore, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_not_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_not_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_bufferstore_value_unbound_var():
s = tir.Schedule(argmax_split_body_bufferstore_value_unbound_var, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_one_let_var_used_multi_times():
s = tir.Schedule(argmax_split_one_let_var_used_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_body_one_buffer_updated_multi_times():
s = tir.Schedule(argmax_split_body_one_buffer_updated_multi_times, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_argmax_init_buffer_not_match():
s = tir.Schedule(argmax_split_init_buffer_not_match, debug_mask="all")
argmax = s.get_block("argmax")
_, _, ki = s.get_loops(argmax)
with pytest.raises(tvm.tir.ScheduleError):
s.rfactor(ki, 1)
def test_reduction_rfactor_topi_argmax():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmax(A, axis=1)
argmax_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmax_topi, debug_mask="all")
argmax = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmax)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmax_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmax).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmax_topi)
def test_reduction_rfactor_topi_argmin():
A = te.placeholder((1, 32), dtype="int32")
B = topi.argmin(A, axis=1)
argmin_topi = te.create_prim_func([A, B])
s = tir.Schedule(argmin_topi, debug_mask="all")
argmin = s.get_block("placeholder_red_temp")
_, k = s.get_loops(argmin)
_, ki = s.split(k, [None, 8])
rf_block = s.rfactor(ki, 1)
tvm.ir.assert_structural_equal(s.mod["main"], argmin_topi_rfactor)
assert s.get(rf_block).same_as(s.get(s.get_block("placeholder_red_temp_rf")))
assert s.get(argmin).same_as(s.get(s.get_block("placeholder_red_temp")))
verify_trace_roundtrip(s, mod=argmin_topi)
if __name__ == "__main__":
tvm.testing.main()
| 64,661 | 38.308207 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_identify_memcpy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import pytest
import tvm
import tvm.testing
from tvm.tir import BufferRegion, StringImm
from tvm.script import tir as T
identify_memcpy = tvm.tir.analysis._ffi_api._identify_memcpy
class BaseTest:
"""Utility class for defining unit tests for memcpy"""
def __init_subclass__(cls):
cls.func = tvm.testing.CompareBeforeAfter._normalize_before(cls.func)
cls.expected = pytest.fixture(cls.expected)
def test_identify_memcpy(self, func, expected):
results = identify_memcpy(func.body)
if isinstance(expected, str) or (
isinstance(expected, tuple) and isinstance(expected[0], BufferRegion)
):
expected = [expected]
assert len(expected) == len(results)
for expected, result in zip(expected, results):
if isinstance(expected, str):
assert isinstance(result, StringImm)
assert re.search(expected, result.value)
else:
tvm.ir.assert_structural_equal(result, expected)
class Test1D(BaseTest):
"""Simplest test case"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[1024, "float32"]):
for i in T.serial(1024):
B[i] = A[i]
def expected(self, func):
A, B = func.buffer_map.values()
return A[0:1024], B[0:1024]
class Test1DCompute(BaseTest):
"""Like Test1D, but a computation prevents this being a memcpy"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[1024, "float32"]):
for i in T.serial(1024):
B[i] = A[i] + 1.0
def expected(self, func):
return "Expected BufferStore's value to be BufferLoad"
class Test1DConditional(BaseTest):
"""Like Test1D, but a conditionals prevents this being a memcpy"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[1024, "float32"]):
for i in T.serial(1024):
if i < 1024:
B[i] = A[i]
def expected(self, func):
A, B = func.buffer_map.values()
return "Expected innermost loop to have BufferStore body"
class Test1DStridedInput(BaseTest):
"""Like Test1D, but strided input prevents this being a memcpy"""
def func(A: T.Buffer[2048, "float32"], B: T.Buffer[1024, "float32"]):
for i in T.serial(1024):
B[i] = A[i * 2]
def expected(self, func):
return "Mismatch between loop iterations (.*) and number of src indices"
class Test1DStridedOutput(BaseTest):
"""Like Test1D, but strided output prevents this being a memcpy"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[2048, "float32"]):
for i in T.serial(1024):
B[i * 2] = A[i]
def expected(self, func):
return "Mismatch between loop iterations (.*) and number of dst indices"
class Test1DInput2DOutputFusedLoop(BaseTest):
"""Like Test1D, but the output is written as a 2-d buffer"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[(32, 32), "float32"]):
for i in T.serial(1024):
B[i // 32, i % 32] = A[i]
def expected(self, func):
A, B = func.buffer_map.values()
return A[0:1024], B[0:32, 0:32]
class Test2DInput1DOutputFusedLoop(BaseTest):
"""Like Test1D, but the input is written as a 2-d buffer"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[1024, "float32"]):
for i in T.serial(1024):
B[i] = A[i // 32, i % 32]
def expected(self, func):
A, B = func.buffer_map.values()
return A[0:32, 0:32], B[0:1024]
class Test1DInput1DOutputNestedLoop(BaseTest):
"""Like Test1D, but the iterator is written as a nested loop
In test cases with more than one loop, each loop is checked to see
if could be written as a memcpy. The C++ utility function
operates on individual loops, but for unit testing in Python, it
is more convenient to return the results for all loops.
"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[1024, "float32"]):
for i, j in T.grid(32, 32):
B[i * 32 + j] = A[i * 32 + j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
(A[0:1024], B[0:1024]),
(A[i * 32 : i * 32 + 32], B[i * 32 : i * 32 + 32]),
]
class Test1DInput1DOutputNestedLoopEquivalentExpressions(BaseTest):
"""Like Test1DInput1DOutputNestedLoop, but with equivalent indices
If the expressions are not identical, the loops may still be
recognizable as a memcpy, so long as the expressions are
equivalent.
"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[1024, "float32"]):
for i, j in T.grid(32, 32):
B[i * 32 + j] = A[j + i * 32]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
(A[0:1024], B[0:1024]),
(A[i * 32 : i * 32 + 32], B[i * 32 : i * 32 + 32]),
]
class Test1DInput2DOutputNestedLoop(BaseTest):
"""Like Test1DInput1DOutputNestedLoop, but with a 2-d output buffer"""
def func(A: T.Buffer[1024, "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[i, j] = A[i * 32 + j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
(A[0:1024], B[0:32, 0:32]),
(A[i * 32 : i * 32 + 32], B[i, 0:32]),
]
class Test2DInput1DOutputNestedLoop(BaseTest):
"""Like Test1DInput1DOutputNestedLoop, but with a 2-d input buffer"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[1024, "float32"]):
for i, j in T.grid(32, 32):
B[i * 32 + j] = A[i, j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
(A[0:32, 0:32], B[0:1024]),
(A[i, 0:32], B[i * 32 : i * 32 + 32]),
]
class Test2DInput2DOutputNestedLoop(BaseTest):
"""Like Test1DInput1DOutputNestedLoop, but with 2-d input/output buffers"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[i, j] = A[i, j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
(A[0:32, 0:32], B[0:32, 0:32]),
(A[i, 0:32], B[i, 0:32]),
]
class Test2DInput2DOutputTransposeOutput(BaseTest):
"""Test2DInput2DOutputNestedLoop, but with a transposed output
This is not recognized as a memcpy, because it results in a transpose.
"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[j, i] = A[i, j]
def expected(self, func):
return [
"different source",
"Mismatch .* number of dst indices touched",
]
class Test2DInput2DOutputTransposeInput(BaseTest):
"""Test2DInput2DOutputNestedLoop, but with a transposed input
This is not recognized as a memcpy, because it results in a transpose.
"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[i, j] = A[j, i]
def expected(self, func):
return [
"different source",
"Mismatch .* number of src indices touched",
]
class Test2DInput2DOutputTransposeBoth(BaseTest):
"""Test2DInput2DOutputNestedLoop, but with a transposed input
The inner loop is not recognized as a memcpy, because it has
strided access of both the input and output buffers. However, the
outer loop is still recognized as a memcpy, because the full
region has been copied over, even though it occurs out of order.
"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[j, i] = A[j, i]
def expected(self, func):
A, B = func.buffer_map.values()
return [
(A[0:32, 0:32], B[0:32, 0:32]),
"Mismatch .* number of src indices touched",
]
class TestCacheRead(BaseTest):
"""Like Test2DInput2DOutputNestedLoop, but with a 1-d
The inner loop is a memcpy of a single row at a time. This
pattern would appear when B is a read cache of A.
"""
def func(A: T.Buffer[(32, 32), "float32"], B: T.Buffer[32, "float32"]):
for i, j in T.grid(32, 32):
B[j] = A[i, j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
"does not form a bijective transform",
(A[i, 0:32], B[0:32]),
]
class TestCacheWrite(BaseTest):
"""Like Test2DInput2DOutputNestedLoop, but with a 1-d
The inner loop is a memcpy of a single row at a time. This
pattern would appear when A is a write cache of B.
"""
def func(A: T.Buffer[32, "float32"], B: T.Buffer[(32, 32), "float32"]):
for i, j in T.grid(32, 32):
B[i, j] = A[j]
def expected(self, func):
A, B = func.buffer_map.values()
i = func.body.loop_var
return [
"does not form a bijective transform",
(A[0:32], B[i, 0:32]),
]
if __name__ == "__main__":
tvm.testing.main()
| 10,187 | 30.347692 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_rpc_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.rpc import base
import pytest
import random
@pytest.mark.parametrize("device_key", ["16e995b6", "127.0.0.1:5555"])
def test_rpc_base_random_key(device_key):
random.seed(0)
key = base.random_key(device_key)
assert key.startswith(device_key)
res_device_key, _ = base.split_random_key(key)
assert device_key == res_device_key
# start with seed 0 as well, but use cmap arg(a conflict map)
# to generate another unique random key
random.seed(0)
new_key = base.random_key(device_key, cmap={key})
assert key != new_key
assert new_key.startswith(device_key)
res_device_key2, _ = base.split_random_key(new_key)
assert device_key == res_device_key2
if __name__ == "__main__":
tvm.testing.main()
| 1,539 | 36.560976 | 70 | py |
tvm | tvm-main/tests/python/unittest/test_arith_deduce_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.tir.buffer import decl_buffer
def test_deduce():
a = te.var("a")
b = te.var("b")
c = te.var("c")
d = te.var("d")
b_s = tvm.arith.IntervalSet(2, 3)
c_s = tvm.arith.IntervalSet(10, 15)
d_s = tvm.arith.IntervalSet(-3, -1)
zero = tvm.tir.const(0, "int32")
fdiv = tvm.te.floordiv
e0 = (-b) * a + c - d
res0 = tvm.arith.deduce_bound(a, e0 >= 0, {b: b_s, c: c_s, d: d_s}, {})
ans0 = fdiv(d - c, b * -1)
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
# expression containing variable a is on rhs
res0 = tvm.arith.deduce_bound(a, zero <= e0, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
e0 = d * a + c - d
res0 = tvm.arith.deduce_bound(a, e0 >= 0, {b: b_s, c: c_s, d: d_s}, {})
ans0 = fdiv(d - c, d)
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
# expression containing variable a is on rhs
res0 = tvm.arith.deduce_bound(a, zero <= e0, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res0.max_value, ans0)
e1 = a * 4 + b < c
res1 = tvm.arith.deduce_bound(a, e1, {b: b_s, c: c_s, d: d_s}, {})
ans1 = fdiv(c - 1 - b, 4)
tvm.testing.assert_prim_expr_equal(res1.max_value, ans1)
# expression containing variable a is on rhs
e1 = c > a * 4 + b
res1 = tvm.arith.deduce_bound(a, e1, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res1.max_value, ans1)
e2 = tvm.te.max(5, a * 4) < 0
res2 = tvm.arith.deduce_bound(a, e2, {b: b_s, c: c_s, d: d_s}, {})
assert str(res2.max_value) == "neg_inf"
assert str(res2.min_value) == "pos_inf"
# expression containing variable a is on rhs
e2 = zero < tvm.te.max(5, a * 4)
res2 = tvm.arith.deduce_bound(a, e2, {b: b_s, c: c_s, d: d_s}, {})
assert str(res2.max_value) == "neg_inf"
assert str(res2.min_value) == "pos_inf"
e3 = (-b) + a * c - d
res3 = tvm.arith.deduce_bound(a, e3 >= 0, {b: b_s, c: c_s, d: d_s}, {b: b_s, d: d_s})
ans3 = fdiv(2, c) + 1
tvm.testing.assert_prim_expr_equal(res3.min_value, ans3)
res3 = tvm.arith.deduce_bound(a, zero <= e3, {b: b_s, c: c_s, d: d_s}, {b: b_s, d: d_s})
tvm.testing.assert_prim_expr_equal(res3.min_value, ans3)
# tests for `EQ` op
res4 = tvm.arith.deduce_bound(a, a == b, {}, {})
tvm.testing.assert_prim_expr_equal(res4.max_value, b)
tvm.testing.assert_prim_expr_equal(res4.min_value, b)
# Unsatisfiable `EQ`, variable as one of the Operand
res5 = tvm.arith.deduce_bound(a, (a == b), {b: b_s}, {b: b_s})
assert str(res5.max_value) == "neg_inf"
assert str(res5.min_value) == "pos_inf"
# variable `a` on the RHS side
res6 = tvm.arith.deduce_bound(a, 10 == a, {}, {})
tvm.testing.assert_prim_expr_equal(res6.max_value, 10)
tvm.testing.assert_prim_expr_equal(res6.min_value, 10)
# Add, Sub in `EQ`
e4 = (a - c) == (b + d)
ans4 = b + d + c
res7 = tvm.arith.deduce_bound(a, e4, {b: b_s, c: c_s, d: d_s}, {})
tvm.testing.assert_prim_expr_equal(res7.max_value, ans4)
tvm.testing.assert_prim_expr_equal(res7.min_value, ans4)
# Satisfiable Mul in `EQ` with negative sign
res8 = tvm.arith.deduce_bound(a, (5 * a == -10), {}, {})
tvm.testing.assert_prim_expr_equal(res8.max_value, -2)
tvm.testing.assert_prim_expr_equal(res8.min_value, -2)
# Unsatisfiable Mul in `EQ`
e5 = 4 * a == b
res9 = tvm.arith.deduce_bound(a, e5, {b: b_s}, {})
assert str(res9.max_value) == "neg_inf"
assert str(res9.min_value) == "pos_inf"
res10 = tvm.arith.deduce_bound(a, (b * a == b), {b: b_s}, {})
# simplifier is now able to prove symbolic relation (b * a % b == 0)
tvm.testing.assert_prim_expr_equal(res10.max_value, 1)
tvm.testing.assert_prim_expr_equal(res10.min_value, 1)
def test_check():
a = te.var("a")
b = te.var("b")
c = te.var("c")
d = te.var("d")
b_s = tvm.arith.IntervalSet(2, 3)
c_s = tvm.arith.IntervalSet(5, 7)
d_s = tvm.arith.IntervalSet(-3, -1)
# no compare operator
res1 = tvm.arith.deduce_bound(a, a + b, {b: b_s}, {})
assert res1.is_nothing()
# multiple compare operators
res2 = tvm.arith.deduce_bound(a, (a + b > 3).astype(c.dtype) > c, {b: b_s, c: c_s}, {})
assert res2.is_nothing()
# multiple target variable
res2 = tvm.arith.deduce_bound(a, a * 2 - a > b, {b: b_s}, {})
assert res2.is_nothing()
def test_deduce_basic():
def test_basic(a1, a2, coff):
a = te.var("a")
b = te.var("b")
b_s = tvm.arith.IntervalSet(a1, a2)
e0 = b + a * coff + 3
res1 = tvm.arith.deduce_bound(a, e0 < 17, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) < 17, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(17, "int32") < e0, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) > 17, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(17, "int32") >= e0, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) <= 17, True)
res1 = tvm.arith.deduce_bound(a, e0 >= 17, {b: b_s}, {b: b_s})
[x, y] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal((x * coff + 3 + y) >= 17, True)
test_basic(0, 4, 4)
test_basic(1, 5, 4)
test_basic(2, 6, 4)
test_basic(0, 4, -4)
test_basic(1, 5, -4)
test_basic(2, 6, -4)
def test_deduce_complex():
def test_complex(a1, a2, coff):
a = te.var("a")
b = te.var("b")
b_s = tvm.arith.IntervalSet(a1, a2)
e0 = (b * 3 + a * coff) * 4
res1 = tvm.arith.deduce_bound(a, e0 < 63, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) < 63, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(63, "int32") >= e0, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff > 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) <= 63, True)
res1 = tvm.arith.deduce_bound(a, e0 > 63, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) > 63, True)
# expression containing variable a is on rhs
res1 = tvm.arith.deduce_bound(a, tvm.tir.const(63, "int32") <= e0, {b: b_s}, {b: b_s})
[t, x] = [res1.max_value, b_s.max_value] if coff < 0 else [res1.min_value, b_s.min_value]
tvm.testing.assert_prim_expr_equal(((x * 3 + t * coff) * 4) >= 63, True)
test_complex(0, 4, 4)
test_complex(0, 4, -4)
test_complex(2, 6, 4)
test_complex(0, 4, -4)
test_complex(1, 5, -4)
test_complex(2, 6, -4)
def test_deduce_non_support():
a = te.var("a")
def test_non_support(lhs):
res = tvm.arith.deduce_bound(a, lhs < 10, {}, {})
assert res.is_nothing()
test_non_support(tvm.tir.floormod(a, 16))
test_non_support(tvm.tir.Min(a, 16))
test_non_support(tvm.tir.Max(a, 16))
test_non_support(tvm.tir.LE(a, 16))
test_non_support(tvm.tir.LT(a, 16))
test_non_support(tvm.tir.GE(a, 16))
test_non_support(tvm.tir.GT(a, 16))
test_non_support(tvm.tir.EQ(a, 16))
test_non_support(tvm.tir.NE(a, 16))
test_non_support(tvm.tir.log(a))
test_non_support(tvm.tir.BufferLoad(decl_buffer([16], "int32"), [a]))
def test_deduce_floordiv():
def do_test(gen_expr, dom_map, expect_min, expect_max):
a = te.var("a")
expr = gen_expr(a)
res = tvm.arith.deduce_bound(a, expr, dom_map, dom_map)
if isinstance(expect_min, str):
assert str(res.min_value) == expect_min
else:
tvm.testing.assert_prim_expr_equal(res.min_value, expect_min)
if isinstance(expect_max, str):
assert str(res.max_value) == expect_max
else:
tvm.testing.assert_prim_expr_equal(res.max_value, expect_max)
# test basic cases
do_test(lambda a: a // 8 > 3, {}, 32, "pos_inf")
do_test(lambda a: a // 8 >= 3, {}, 24, "pos_inf")
do_test(lambda a: a // 8 < 3, {}, "neg_inf", 23)
do_test(lambda a: a // 8 <= 3, {}, "neg_inf", 31)
do_test(lambda a: a // 8 == 3, {}, "pos_inf", "neg_inf")
do_test(lambda a: a // 8 > -3, {}, -16, "pos_inf")
do_test(lambda a: a // 8 >= -3, {}, -24, "pos_inf")
do_test(lambda a: a // -8 > 3, {}, "neg_inf", -32)
do_test(lambda a: a // -8 >= 3, {}, "neg_inf", -24)
do_test(lambda a: a // -8 < 3, {}, -23, "pos_inf")
do_test(lambda a: a // -8 <= 3, {}, -31, "pos_inf")
do_test(lambda a: 8 // a >= 2, {}, "pos_inf", "neg_inf")
# test nested cases
b = te.var("b")
bs = {b: tvm.arith.IntervalSet(2, 6)}
do_test(lambda a: b * 3 + a // 8 < 63, bs, "neg_inf", 359)
do_test(lambda a: b * 3 + a // 8 <= 63, bs, "neg_inf", 367)
do_test(lambda a: b * 3 + a // 8 > 63, bs, 464, "pos_inf")
do_test(lambda a: b * 3 + a // 8 >= 63, bs, 456, "pos_inf")
if __name__ == "__main__":
tvm.testing.main()
| 10,577 | 37.889706 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_cross_thread_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing import te_workload
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.script import tir as T
from tvm.target import Target
from tvm.te import create_prim_func
@tvm.script.ir_module
class Softmax_mn_after_inline:
@T.prim_func
def main(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_maxelem[i0_1] = T.min_value("float32")
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
def test_gpu_softmax_mn():
@T.prim_func
def softmax_mn_0(
A: T.Buffer((256, 256), "float32"),
T_softmax_norm: T.Buffer((256, 256), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_2, i1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1_1])
T_softmax_exp[i0_2, i1_1] = T.exp(
A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis": 1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@T.prim_func
def softmax_mn_1(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_exp"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_2 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(A[i0_2, i1], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_exp[i0_2, i1])
T_softmax_exp[i0_2, i1] = T.exp(
A[i0_2, i1] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_4, k = T.axis.remap("SR", [i0_3, i1])
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum[i0_4])
with T.init():
T_softmax_expsum[i0_4] = T.float32(0)
T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k]
for i0_5, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1])
T.reads(T_softmax_exp[i0_6, i1_2], T_softmax_expsum[i0_6])
T.writes(T_softmax_norm[i0_6, i1_2])
T.block_attr({"axis": 1})
T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6]
@T.prim_func
def softmax_mn_2(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_exp"):
i0_2, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(A[i0_2, i1_1], T_softmax_maxelem[i0_2])
T.writes(T_softmax_exp[i0_2, i1_1])
T_softmax_exp[i0_2, i1_1] = T.exp(
A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 32):
for ax1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_4 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1_0 * 8 + ax1_1)
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum_shared[i0_4])
with T.init():
T_softmax_expsum_shared[i0_4] = T.float32(0)
T_softmax_expsum_shared[i0_4] = (
T_softmax_expsum_shared[i0_4] + T_softmax_exp[i0_4, k]
)
for i1_0 in T.serial(32):
for i1_1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0_3)
i1 = T.axis.spatial(256, i1_0 * 8 + i1_1_1)
T.reads(T_softmax_exp[i0_5, i1], T_softmax_expsum_shared[i0_5])
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T_softmax_exp[i0_5, i1] / T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def softmax_mn_3(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_exp"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_2 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(A[i0_2, i1], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_exp[i0_2, i1])
T_softmax_exp[i0_2, i1] = T.exp(
A[i0_2, i1] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 32):
for ax1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_4 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1_0 * 8 + ax1_1)
T.reads(T_softmax_exp[i0_4, k])
T.writes(T_softmax_expsum_shared[i0_4])
with T.init():
T_softmax_expsum_shared[i0_4] = T.float32(0)
T_softmax_expsum_shared[i0_4] = (
T_softmax_expsum_shared[i0_4] + T_softmax_exp[i0_4, k]
)
for i1_0 in T.serial(32):
for i1_1 in T.thread_binding(8, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0_3)
i1 = T.axis.spatial(256, i1_0 * 8 + i1_1)
T.reads(T_softmax_exp[i0_5, i1], T_softmax_expsum_shared[i0_5])
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T_softmax_exp[i0_5, i1] / T_softmax_expsum_shared[i0_5]
)
decision_0 = [] # type: ignore
decision_1 = [
("SampleCategorical", 7),
]
decision_2 = [
("SampleCategorical", 1),
]
decision_3 = [
("SampleCategorical", 1),
("SampleCategorical", 7),
]
mod = create_prim_func(te_workload.softmax_mn(n=256, m=256))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[softmax_mn_0, softmax_mn_1, softmax_mn_2, softmax_mn_3],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_gpu_softmax_mn_after_inline():
@T.prim_func
def softmax_mn_after_inline_0(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum[i0_2])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_4, i1_1], T_softmax_maxelem[i0_4], T_softmax_expsum[i0_4])
T.writes(T_softmax_norm[i0_4, i1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_1(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum = T.alloc_buffer([256], dtype="float32")
for i0, i1_0 in T.grid(256, 4):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, i1_0 * 64 + i1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_expsum"):
i0_2, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum[i0_2])
with T.init():
T_softmax_expsum[i0_2] = T.float32(0)
T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i0_3, i1 in T.grid(256, 256):
with T.block("T_softmax_norm"):
i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1])
T.reads(A[i0_4, i1_1], T_softmax_maxelem[i0_4], T_softmax_expsum[i0_4])
T.writes(T_softmax_norm[i0_4, i1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1] = (
T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_2(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem = T.alloc_buffer([256], dtype="float32")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0, i1 in T.grid(256, 256):
with T.block("T_softmax_maxelem"):
i0_1, k = T.axis.remap("SR", [i0, i1])
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem[i0_1])
with T.init():
T_softmax_maxelem[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k])
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_2 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_2, k], T_softmax_maxelem[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_4 = T.axis.spatial(256, i0_3)
i1_1_1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(
A[i0_4, i1_1_1], T_softmax_maxelem[i0_4], T_softmax_expsum_shared[i0_4]
)
T.writes(T_softmax_norm[i0_4, i1_1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1_1] = (
T.exp(A[i0_4, i1_1_1] - T_softmax_maxelem[i0_4], dtype="float32")
/ T_softmax_expsum_shared[i0_4]
)
@T.prim_func
def softmax_mn_after_inline_3(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0_3 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_2 = T.axis.spatial(256, i0_3 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
T.where(i1_0 * 512 + i1_1 < 256)
i0_4 = T.axis.spatial(256, i0_3)
i1_1_1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.reads(
A[i0_4, i1_1_1],
T_softmax_maxelem_shared[i0_4],
T_softmax_expsum_shared[i0_4],
)
T.writes(T_softmax_norm[i0_4, i1_1_1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_4, i1_1_1] = (
T.exp(A[i0_4, i1_1_1] - T_softmax_maxelem_shared[i0_4], dtype="float32")
/ T_softmax_expsum_shared[i0_4]
)
decision_0 = [] # type: ignore
decision_1 = [
("SampleCategorical", 4),
]
decision_2 = [
("SampleCategorical", 7),
]
decision_3 = [
("SampleCategorical", 7),
("SampleCategorical", 0),
]
mod = Softmax_mn_after_inline
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[
softmax_mn_after_inline_0,
softmax_mn_after_inline_1,
softmax_mn_after_inline_2,
softmax_mn_after_inline_3,
],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_gpu_batch_norm_bmn():
@T.prim_func
def batch_norm_bmn_0(A: T.Buffer((1, 512, 512), "float32"), D: T.Buffer(1, "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([1], dtype="float32")
for i0, i1, i2 in T.grid(1, 512, 512):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [i0, i1, i2])
T.reads(A[b, i, j])
T.writes(C[b])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.spatial(1, i0)
T.reads(C[b])
T.writes(D[b])
D[b] = T.sqrt(C[b], dtype="float32")
@T.prim_func
def batch_norm_bmn_1(A: T.Buffer((1, 512, 512), "float32"), D: T.Buffer(1, "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C_shared = T.alloc_buffer([1], dtype="float32", scope="shared")
for i0_0 in T.serial(1):
for ax0, ax1_ax2_fused_0 in T.grid(1, 1024):
for ax1_ax2_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("C"):
b = T.axis.spatial(1, ax0)
i = T.axis.reduce(512, (ax1_ax2_fused_0 * 256 + ax1_ax2_fused_1) // 512)
j = T.axis.reduce(512, (ax1_ax2_fused_0 * 256 + ax1_ax2_fused_1) % 512)
T.reads(A[b, i, j])
T.writes(C_shared[b])
with T.init():
C_shared[b] = T.float32(0)
C_shared[b] = C_shared[b] + A[b, i, j] * A[b, i, j]
for i0_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("D"):
T.where(i0_0 * 256 + i0_1 < 1)
b = T.axis.spatial(1, i0_0 * 256 + i0_1)
T.reads(C_shared[b])
T.writes(D[b])
D[b] = T.sqrt(C_shared[b], dtype="float32")
decision_0 = [] # type: ignore
decision_1 = [
("SampleCategorical", 6),
]
mod = create_prim_func(te_workload.norm_bmn(B=1, M=512, N=512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[batch_norm_bmn_0, batch_norm_bmn_1],
expected_decisions=[decision_0, decision_1],
)
@T.prim_func
def argmax(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_32(
idx: T.Buffer((1, 32), "int32"),
val: T.Buffer((1, 32), "float32"),
argmax_v0: T.Buffer((1,), "int32"),
argmax_v1: T.Buffer((1,), "float32"),
) -> None:
for i0, i1 in T.grid(1, 32):
with T.block("argmax"):
i = T.axis.spatial(1, i0)
k = T.axis.reduce(32, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def test_gpu_argmax():
@T.prim_func
def argmax_0(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer(128, "int32"),
argmax_v1: T.Buffer(128, "float32"),
) -> None:
# body
# with T.block("root")
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer(128, "int32"),
argmax_v1: T.Buffer(128, "float32"),
) -> None:
# body
# with T.block("root")
for i0, i1_0 in T.grid(128, 2):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 64 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
decision_0 = [] # type: ignore
decision_1 = [
("SampleCategorical", 4),
]
mod = argmax
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1],
expected_decisions=[decision_0, decision_1],
)
def test_gpu_argmax_32():
@T.prim_func
def argmax_0(
idx: T.Buffer((1, 32), "int32"),
val: T.Buffer((1, 32), "float32"),
argmax_v0: T.Buffer((1,), "int32"),
argmax_v1: T.Buffer((1,), "float32"),
) -> None:
# body
# with T.block("root")
for i0, i1 in T.grid(1, 32):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer((1, 32), "int32"),
val: T.Buffer((1, 32), "float32"),
argmax_v0: T.Buffer((1,), "int32"),
argmax_v1: T.Buffer((1,), "float32"),
) -> None:
# body
# with T.block("root")
for i0, i1_0 in T.grid(1, 1):
for i1_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(1, i0)
k = T.axis.reduce(32, i1_0 * 64 + i1_1)
T.where(i1_0 * 64 + i1_1 < 32)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
decision_0 = [] # type: ignore
decision_1 = [
("SampleCategorical", 4),
]
mod = argmax_32
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3090", host="llvm"),
types=ms.schedule_rule.CrossThreadReduction,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1],
expected_decisions=[decision_0, decision_1],
)
if __name__ == "__main__":
test_gpu_softmax_mn()
test_gpu_softmax_mn_after_inline()
test_gpu_batch_norm_bmn()
test_gpu_argmax()
test_gpu_argmax_32()
| 34,219 | 43.673629 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_mutator_mutate_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=invalid-name, no-member
@T.prim_func
def add(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [2048, 2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048, 2048], dtype="float32")
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
# body
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("move"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([A_cached[vi, vj, vk]])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads([A_cached[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
# pylint: enable=invalid-name, no-member
def _sch(decision: int) -> Schedule:
sch = Schedule(add, debug_mask="all")
# pylint: disable=invalid-name
b0 = sch.get_block(name="move", func_name="main")
l1 = sch.sample_compute_location(block=b0, decision=decision)
sch.compute_at(block=b0, loop=l1, preserve_unit_loops=True)
# pylint: enable=invalid-name
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=add,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateComputeLocation(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_compute_location_add():
mutator = _make_mutator(
target=Target("llvm"),
)
sch = _sch(decision=4)
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
decision = trace.decisions[trace.insts[-2]]
assert not decision == 4
results.add(decision)
assert len(results) == 9
if __name__ == "__main__":
test_mutate_compute_location_add()
| 3,269 | 34.543478 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_storage_rewrite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
def test_storage_share():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
num_stage = 5
B = A
for t in range(num_stage):
B = te.compute((m, l), lambda i, j: B[i, j] + (t + 1), name="A%d" % t)
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def register_mem(scope_tb, max_bits):
# Register mem
@tvm.register_func("tvm.info.mem.%s" % scope_tb)
def mem_info_inp_buffer():
return tvm.ir.make_node(
"MemoryInfo", unit_bits=16, max_simd_bits=32, max_num_bits=max_bits, head_address=None
)
def test_alloc_seq():
scope_tb = "local.L0A"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="B", scope=scope_tb)
A[j] = 1.3
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_different_dtypes():
def stmt_generater(dtype_list, length):
ib = tvm.tir.ir_builder.create()
base_dtype = dtype_list[0]
global_a = te.placeholder((length,), name="global_a", dtype=base_dtype)
assert len(dtype_list) == 4
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[0]
A = ib.allocate(dtype, length, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[1]
B = ib.allocate(dtype, length, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[2]
C = ib.allocate(dtype, length, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = dtype_list[3]
D = ib.allocate(dtype, length, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, dtype=dtype)
with ib.for_range(0, length, name="j") as j:
dtype = "int8"
E = ib.allocate(dtype, length, name="E", scope="local.L0A")
E[j] = A[j].astype(dtype) + B[j].astype(dtype) + C[j].astype(dtype) + D[j].astype(dtype)
return ib.get()
def dtype_bit_len(dtype):
index = 0
for i in dtype:
if i.isdigit():
break
index += 1
return int(dtype[index:])
def offset_generater(dtype_list, length):
dtype_len_list = [dtype_bit_len(i) for i in dtype_list]
base_len = dtype_len_list[0]
return sum([i * length / base_len for i in dtype_len_list])
def dtype_test(dtype_list, length):
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == offset
body = stmt_generater(dtype_list, length)
offset = offset_generater(dtype_list, length)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
tvm.tir.stmt_functor.post_order_visit(body, verify)
length = 1024
dtype_list = ["float16", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float32", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["float64", "int32", "uint16", "int8"]
dtype_test(dtype_list, length)
dtype_list = ["int8", "int32", "uint16", "uint8"]
dtype_test(dtype_list, length)
def test_inplace_rule():
m = 10
A = te.placeholder((m,), name="A")
A0 = te.compute((m,), lambda i: A[i], name="A0")
A1 = te.compute((m,), lambda i: A[i] + 1, name="A1")
AA = te.compute((m,), lambda i: A0[i] + A1[i] + A1[0], name="AA")
B = te.compute((m,), lambda i: AA[i] + 1, name="B")
s = te.create_schedule(B.op)
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_storage_combine():
n = 8
A = te.placeholder((4,), name="A")
num_stage = 5
B = A
stages = []
for t in range(num_stage):
B = te.compute((n,), lambda i: B[i] + B[0] + (t + 1), name="A%d" % t)
stages.append(B)
s = te.create_schedule(B.op)
for S in stages[:-1]:
s[S].set_scope("global:tag")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 16
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_storage_combine_with_vectorization():
n = 1024
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute((n,), lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
AA = s.cache_read(A, "global:tag", readers=[C])
BB = s.cache_read(B, "global:tag", readers=[C])
CC = s.cache_write(C, "global:tag")
s[CC].vectorize(s[CC].op.axis[0])
mod = schedule_to_module(s, [A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.VectorizeLoop()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
num_alloc = [0]
def verify(v):
# find add op
if (
isinstance(v, tvm.tir.Add)
and isinstance(v.a, tvm.tir.BufferLoad)
and isinstance(v.b, tvm.tir.BufferLoad)
):
lhs_ramp = v.a.indices[0]
rhs_ramp = v.b.indices[0]
# these two ramp load should not overlap
assert lhs_ramp.lanes == n
assert rhs_ramp.lanes == n
assert lhs_ramp.base >= rhs_ramp.base + n or rhs_ramp.base >= lhs_ramp.base + n
elif isinstance(v, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
def test_address_of():
# In this test, the storage rewrite pass is allowed to
# combine buffers B and D, but not C
@T.prim_func
def before(A: T.Buffer(8, "float32"), E: T.Buffer(8, "float32")):
B_data = T.allocate([8], "float32")
B = T.Buffer(8, data=B_data, align=32)
for i in range(8):
B[i] = (
T.call_extern("deref", T.address_of(A[i]), dtype="float32")
+ T.call_extern("deref", T.address_of(A[0]), dtype="float32")
+ T.float32(1)
)
C_data = T.allocate([8], "float32")
C = T.Buffer(8, data=C_data, align=32)
for i in range(8):
C[i] = (
T.call_extern("deref", T.address_of(B[i]), dtype="float32")
+ T.call_extern("deref", T.address_of(B[0]), dtype="float32")
+ T.float32(2)
)
D_data = T.allocate([8], "float32")
D = T.Buffer(8, data=D_data, align=32)
for i in range(8):
D[i] = (
T.call_extern("deref", T.address_of(C[i]), dtype="float32")
+ T.call_extern("deref", T.address_of(C[0]), dtype="float32")
+ T.float32(2)
)
for i in range(8):
E[i] = (
T.call_extern("deref", T.address_of(D[i]), dtype="float32")
+ T.call_extern("deref", T.address_of(D[0]), dtype="float32")
+ T.float32(3)
)
def verify(n):
if isinstance(n, tvm.tir.Allocate):
total_alloc[0] += n.extents[0].value
total_alloc = [0]
mod = tvm.IRModule.from_expr(before)
mod.show()
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, verify)
assert total_alloc[0] == 24
total_alloc[0] = 0
mod = tvm.tir.transform.StorageRewrite()(mod)
mod.show()
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, verify)
assert total_alloc[0] == 16
def test_storage_share_gpu():
m = te.var("m")
A = [te.placeholder((m), name="A")]
num_stage = 5
for t in range(num_stage):
A.append(te.compute((m,), lambda i: A[-1][i] + (t + 1), name="A%d_s" % t))
A.append(te.compute((m,), lambda i: A[-1][i], name="A%d" % t))
s = te.create_schedule(A[-1].op)
for t in range(num_stage):
x = A[2 * t + 2].op.axis[0]
bx, tx = s[A[2 * t + 2]].split(x, factor=32)
s[A[2 * t + 2]].bind(bx, te.thread_axis("blockIdx.x"))
s[A[2 * t + 2]].bind(tx, te.thread_axis("threadIdx.x"))
s[A[2 * t + 1]].compute_at(s[A[2 * t + 2]], tx)
s[A[2 * t + 1]].set_scope("shared")
mod = schedule_to_module(s, [A[0], A[-1]])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
alloc_stats = {"global": 0, "shared": 0}
def verify(n):
if isinstance(n, tvm.tir.Allocate):
scope = n.buffer_var.type_annotation.storage_scope
alloc_stats[scope] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert alloc_stats["global"] == 2
assert alloc_stats["shared"] == num_stage
def test_parallel_alloc():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body, tvm.tir.Allocate)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="t") as i:
ib.scope_attr(
tvm.tir.const(1, "int32"), "pragma_scope", tvm.tir.StringImm("parallel_launch_point")
)
with ib.for_range(0, n, name="i", kind="parallel") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", n, name="A", scope="global")
A[j] = A[j] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
assert isinstance(body.body.body.body.body, tvm.tir.Allocate)
def test_while_alloc():
def get_mod(kind="serial"):
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i", kind=kind) as i:
j = ib.allocate("int32", 1, name="j", scope="global")
j[0] = 0
with ib.while_loop(j[0] < 10):
A = ib.allocate("float32", n, name="A", scope="global")
A[j[0]] = A[j[0]] + 2
j[0] += j[0] + 1
body = ib.get()
return tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
mod = get_mod(kind="parallel")
# parallel (i, 0, n) {
# allocate j[int32 * 1]
# j[0] = 0
# while((j[0] < 10)){
# // attr [A] storage_scope = "global"
# allocate A[float32 * n]
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
# parallel (i, 0, n) {
# allocate j[int32 * 1]
# allocate A[float32 * n]
# j[0] = 0
# while((j[0] < 10)){
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
assert isinstance(body.body.body, tvm.tir.Allocate) # j
assert isinstance(body.body.body.body, tvm.tir.Allocate) # A
mod = get_mod(kind="serial")
# for (i, 0, n) {
# allocate j[int32 * 1]
# j[0] = 0
# while((j[0] < 10)){
# // attr [A] storage_scope = "global"
# allocate A[float32 * n]
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
body = tvm.tir.transform.StorageRewrite()(mod)["main"]
# allocate j[int32 * 1]
# allocate A[float32 * n]
# for (i, 0, n) {
# j[0] = 0
# while((j[0] < 10)){
# A[j[0]] = (A[j[0]] + 2f)
# j[0] = (j[0] + (j[0] + 1))
# }
# }
assert isinstance(body.body, tvm.tir.Allocate) # j
assert isinstance(body.body.body, tvm.tir.Allocate) # A
def test_inplace_rule2(scope_tb="local_TB2", max_bits=1024 * 1024 * 1024):
# Test Buffer
register_mem(scope_tb, max_bits)
m = 10
A = te.placeholder((m,), name="A")
C = te.placeholder((m,), name="C")
D = te.placeholder((m,), name="D")
A0 = te.compute((m,), lambda i: A[i] + C[i], name="A0")
A1 = te.compute((m,), lambda i: D[i] * D[i], name="A1")
A2 = te.compute((m,), lambda i: A0[i] + A1[i], name="A2")
B = te.compute((m,), lambda i: A2[i], name="B")
s = te.create_schedule(B.op)
A0L = s.cache_read(A0, scope_tb, [A2])
A1L = s.cache_read(A1, scope_tb, [A2])
A2L = s.cache_read(A2, scope_tb, [B])
mod = schedule_to_module(s, [A, B, C, D])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 2
def test_exceed_mem():
max_bits = 639
# The critical max_num_bits is between 639 and 640
loc = -1
try:
test_inplace_rule2("local_TEM", max_bits)
except Exception as e:
estr = str(e)
loc = estr.find("Allocation exceed bound of memory")
assert loc != -1
def test_inplace_rule3():
# Test Buffer
scope_tb = "local_TB3"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
m = 10
B0 = te.placeholder((m,), name="B0")
B1 = te.placeholder((m,), name="B1")
B2 = te.placeholder((m,), name="B2")
B3 = te.placeholder((m,), name="B3")
B4 = te.placeholder((m,), name="B4")
B5 = te.placeholder((m,), name="B5")
B6 = te.compute((m,), lambda i: B1[i] * B5[i], name="B6")
B7 = te.compute((m,), lambda i: B2[i] * B4[i], name="B7")
B8 = te.compute((m,), lambda i: B6[i] - B7[i], name="B8")
B9 = te.compute((m,), lambda i: B2[i] * B3[i], name="B9")
B10 = te.compute((m,), lambda i: B0[i] * B5[i], name="B10")
B11 = te.compute((m,), lambda i: B9[i] - B10[i], name="B11")
B12 = te.compute((m,), lambda i: B0[i] * B4[i], name="B12")
B13 = te.compute((m,), lambda i: B1[i] * B3[i], name="B13")
B14 = te.compute((m,), lambda i: B12[i] - B13[i], name="B14")
B = te.compute((m,), lambda i: B8[i] * B11[i] + B14[i], name="B")
s = te.create_schedule(B.op)
B1L = s.cache_read(B1, scope_tb, [B6, B13])
B5L = s.cache_read(B5, scope_tb, [B6, B10])
B2L = s.cache_read(B2, scope_tb, [B7, B9])
B4L = s.cache_read(B4, scope_tb, [B7, B12])
B3L = s.cache_read(B3, scope_tb, [B9, B13])
B0L = s.cache_read(B0, scope_tb, [B10, B12])
B8L = s.cache_write(B8, scope_tb)
B11L = s.cache_write(B11, scope_tb)
B14L = s.cache_write(B14, scope_tb)
B6L = s.cache_write(B6, scope_tb)
B7L = s.cache_write(B7, scope_tb)
B9L = s.cache_write(B9, scope_tb)
B10L = s.cache_write(B10, scope_tb)
B12L = s.cache_write(B12, scope_tb)
B13L = s.cache_write(B13, scope_tb)
s[B12].compute_inline()
s[B13].compute_inline()
s[B8].compute_inline()
s[B11].compute_inline()
s[B14].compute_inline()
s[B6].compute_inline()
s[B7].compute_inline()
s[B9].compute_inline()
s[B10].compute_inline()
s = s.normalize()
mod = schedule_to_module(s, [B0, B1, B2, B3, B4, B5, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.StorageRewrite()(mod)
stmt = mod["main"].body
# verify only have one allocations.
# verify inplace folding works
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 70
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_alloc_seq_type():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope="local.L0A")
A1 = ib.allocate("float32", 200, name="A1", scope="local.L0A")
A[j] = 1.2
A1[j] = 1.3
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
C = ib.allocate("int16", 200, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 200, name="D", scope="local.L0A")
D[j] = B[j] + C[j]
A2 = ib.allocate("float32", 200, name="A2", scope="local.L0A")
A2[j] = A[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 500
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_alloc_seq_type2():
scope_tb = "local.L0A2"
max_bits = 1024 * 1024 * 1024
register_mem(scope_tb, max_bits)
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("float32", 200, name="A", scope=scope_tb)
A[j] = 1.2
with ib.for_range(0, 20, name="j") as j:
B = ib.allocate("int16", 400, name="B", scope=scope_tb)
B[j] = tvm.tir.const(1, "int16")
with ib.for_range(0, 10, name="j") as j:
C = ib.allocate("float32", 200, name="C", scope=scope_tb)
C[j] = 1.2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 200
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_reuse_small_buffer():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A = ib.allocate("int16", 200, name="A", scope="local.L0A")
A[j] = tvm.tir.const(1, "int16")
B = ib.allocate("int16", 200, name="B", scope="local.L0A")
B[j] = tvm.tir.const(1, "int16")
B1 = ib.allocate("int16", 200, name="B1", scope="local.L0A")
B1[j] = A[j] + B[j]
C = ib.allocate("int16", 400, name="C", scope="local.L0A")
C[j] = tvm.tir.const(1, "int16")
D = ib.allocate("int16", 400, name="D", scope="local.L0A")
D[j] = tvm.tir.const(1, "int16")
E = ib.allocate("int16", 400, name="E", scope="local.L0A")
E[j] = C[j]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.StorageRewrite()(mod)["main"].body
num_alloc = [0]
def verify(n):
if isinstance(n, tvm.tir.Allocate):
num_alloc[0] += 1
assert n.extents[0].value == 800
tvm.tir.stmt_functor.post_order_visit(body, verify)
assert num_alloc[0] == 1
def test_replace_dataflow():
shape = (255,)
A = te.placeholder(shape, name="A")
B = te.compute(shape, lambda i: A[i] + A[i], name="B")
C = te.compute(shape, lambda i: A[i] + B[i], name="C")
D = te.compute(shape, lambda i: A[i] + C[i], name="D")
E = te.compute(shape, lambda i: A[i] + D[i], name="E")
s = te.create_schedule(E.op)
s.cache_read(A, "local", [B, C, D, E])
bounds = tvm.te.schedule.InferBound(s)
assert isinstance(bounds, tvm.container.Map)
def test_large_input():
@te.hybrid.script
def compute(a, b):
n = 16384
c = output_tensor((n, n), "int32")
for i in range(n):
for j in range(n):
c[i, j] = a[i, j] - b[i, j]
return c
n = 16384
shape = (n, n)
a = te.placeholder(shape, name="a", dtype="int32")
b = te.placeholder(shape, name="b", dtype="int32")
c = te.compute(shape, lambda i, j: compute(a, b)[i, j])
c = te.compute(shape, lambda i, j: 1 + c[i, j])
s = te.create_schedule(c.op)
stmt = tvm.lower(s, [a, b, c])["main"].body
def verify(n):
if isinstance(n, tvm.tir.Allocate):
assert n.extents[0].value == 268435456
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
def test_access_in_let_value():
@T.prim_func
def func(A: T.Buffer((8,), "float32")):
for i in range(8):
B_data = T.allocate((1,), "float32", "global")
B = T.Buffer(shape=[1], dtype="float32", data=B_data)
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
@T.prim_func
def func_rewritten(A: T.Buffer((8,), "float32")) -> None:
B_data = T.allocate((1,), "float32", "global")
B = T.Buffer(shape=[1], dtype="float32", data=B_data)
for i in range(8):
B[0] = 3.14
x: T.float32 = T.exp(B[0], dtype="float32")
A[i] = (x + 1.0) / (x - 1.0)
mod = tvm.tir.transform.StorageRewrite()(tvm.IRModule.from_expr(func))
tvm.ir.assert_structural_equal(mod["main"], func_rewritten)
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.StorageRewrite()
class TestLetBufferRewrite(BaseCompare):
"""StorageRewrite replaces the bound var of backing allocations
If StorageRewrite replaces the backing variable of an array, such
as when vectorizing the storage type, the variable must be
replaced in the LetStmt that defines it. Currently, StmtMutator
only visits usage of variables, and does not visit definitions of
variables, so the definition in a LetStmt must be explicitly
handled.
"""
def before() -> None:
A_data: T.handle("int32") = T.call_extern("dummy_func", dtype="handle")
A = T.Buffer([8], "int32", data=A_data)
A[0:8] = T.broadcast(42, 8)
def expected() -> None:
A_data: T.handle("int32x8") = T.call_extern("dummy_func", dtype="handle")
A = T.Buffer([1], "int32x8", data=A_data)
A[0] = T.broadcast(42, 8)
class TestRewriteInPlaceUseOfNonFlatBuffer(BaseCompare):
"""A non-flat buffer may be re-used for in-place operations"""
def before(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.Buffer(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
C = T.Buffer(
[16, 16],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
def expected(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.Buffer([16, 16], dtype="float32", axis_separators=[1], data=B_data)
C = T.Buffer(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B.data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
class TestNoRewriteOfSharedNonFlatBuffer(BaseCompare):
"""In general, sharing of non-flat buffer isn't supported
The current packing algorithms in StorageRewrite assume a flat
memory space, and do not support packing of N-d buffers. For
buffers with axis separators, normal buffer sharing should be
disabled.
Like TestRewriteInPlaceUseOfNonFlatBuffer, except that B and C do
not have matching shapes.
"""
def before(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")):
B_data = T.allocate(
[16, 16],
dtype="float32",
scope="global",
)
B = T.Buffer(
[16, 16],
dtype="float32",
axis_separators=[1],
data=B_data,
)
C_data = T.allocate(
[20, 20],
dtype="float32",
scope="global",
)
C = T.Buffer(
[20, 20],
dtype="float32",
axis_separators=[1],
data=C_data,
)
for i, j in T.grid(16, 16):
B[i, j] = A[i, j]
for i, j in T.grid(16, 16):
C[i, j] = 2.0 * B[i, j]
for i, j in T.grid(16, 16):
D[i, j] = C[i, j]
expected = before
class TestRewriteDeclBuffer(BaseCompare):
"""A DeclBuffer node may appear in StorageRewrite's input"""
def before(A: T.Buffer(16, "float32"), D: T.Buffer(16, "float32")):
B = T.decl_buffer(16, dtype="float32")
C = T.decl_buffer(16, dtype="float32")
for i in range(16):
B[i] = A[i]
for i in range(16):
C[i] = 2.0 * B[i]
for i in range(16):
D[i] = C[i]
def expected(A: T.Buffer(16, "float32"), D: T.Buffer(16, "float32")):
B = T.decl_buffer(16, dtype="float32")
C = T.decl_buffer(16, dtype="float32", data=B.data)
for i in range(16):
B[i] = A[i]
for i in range(16):
C[i] = 2.0 * B[i]
for i in range(16):
D[i] = C[i]
class TestNoOrphanedDeclBuffer(BaseCompare):
"""A DeclBuffer of an unused Allocate should be removed
StorageRewrite removes any allocations that are unused. When it
does so, any DeclBuffer that refers to that allocation should also
be removed.
"""
def before(A: T.Buffer(16, "float32"), D: T.Buffer(16, "float32")):
B = T.decl_buffer(16, dtype="float32")
C = T.decl_buffer(16, dtype="float32")
Unused = T.decl_buffer(16, dtype="float32")
for i in range(16):
B[i] = A[i]
for i in range(16):
C[i] = 2.0 * B[i]
for i in range(16):
D[i] = C[i]
def expected(A: T.Buffer(16, "float32"), D: T.Buffer(16, "float32")):
B = T.decl_buffer(16, dtype="float32")
C = T.decl_buffer(16, dtype="float32", data=B.data)
for i in range(16):
B[i] = A[i]
for i in range(16):
C[i] = 2.0 * B[i]
for i in range(16):
D[i] = C[i]
if __name__ == "__main__":
tvm.testing.main()
| 30,072 | 31.301826 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_llvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import ctypes
import json
import math
import numpy as np
import pytest
import re
import sys
import tvm
import tvm.testing
from tvm import te
from tvm.contrib import clang, utils
from tvm.relay.backend import Runtime
from tvm.script import tir as T, ir as I
from tvm.target.codegen import llvm_get_intrinsic_name, llvm_lookup_intrinsic_id
@tvm.testing.requires_llvm
def test_llvm_intrin():
ib = tvm.tir.ir_builder.create()
n = tvm.runtime.convert(4)
A = ib.pointer("float32", name="A")
args = [tvm.tir.call_intrin("handle", "tir.address_of", A[0]), 0, 3, 1]
ib.emit(tvm.tir.Evaluate(tvm.tir.Call("int32", "tir.prefetch", args)))
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "prefetch"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_void_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8", name="A")
# Create an intrinsic that returns void.
x = tvm.tir.call_llvm_intrin("", "llvm.va_start", tvm.tir.const(1, "uint32"), A.asobject().data)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_intrinsic_id():
orig_name = "llvm.x86.sse2.pmadd.wd"
intrin_id = llvm_lookup_intrinsic_id(orig_name)
name = llvm_get_intrinsic_name(intrin_id)
assert orig_name == name
@tvm.testing.requires_llvm
def test_llvm_overloaded_intrin():
# Name lookup for overloaded intrinsics in LLVM 4- requires a name
# that includes the overloaded types.
if tvm.target.codegen.llvm_version_major() < 5:
return
def use_llvm_intrinsic(A, C):
ib = tvm.tir.ir_builder.create()
L = A.vload((0, 0))
I = tvm.tir.call_llvm_pure_intrin(
"int32", "llvm.ctlz", tvm.tir.const(2, "uint32"), L, tvm.tir.const(0, "int1")
)
S = C.vstore((0, 0), I)
ib.emit(S)
return ib.get()
A = tvm.te.placeholder((1, 1), dtype="int32", name="A")
C = tvm.te.extern(
(1, 1), [A], lambda ins, outs: use_llvm_intrinsic(ins[0], outs[0]), name="C", dtype="int32"
)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A, C], target="llvm")
@tvm.testing.requires_llvm
def test_llvm_lookup_intrin():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("uint8x8", name="A")
z = tvm.tir.const(0, "int32")
x = tvm.tir.call_llvm_pure_intrin(
"uint8x8", "llvm.ctpop.v8i8", tvm.tir.const(1, "uint32"), A[z]
)
ib.emit(x)
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "main"))
fcode = tvm.build(mod, None, "llvm")
@tvm.testing.requires_llvm
def test_llvm_large_uintimm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
A = te.compute((), lambda: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
def check_llvm():
f = tvm.build(s, [A], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.empty((), dtype=A.dtype, device=dev)
f(a)
assert a.numpy() == value + 3
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_persist_parallel():
n = 128
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
C = te.compute(A.shape, lambda *i: te.sqrt(B(*i)) * 2 + 2, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=8)
xo1, xo2 = s[C].split(xo, nparts=1)
s[B].compute_at(s[C], xo1)
s[B].parallel(s[B].op.axis[0])
s[B].pragma(s[B].op.axis[0], "parallel_barrier_when_finish")
s[C].parallel(xi)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xi, "parallel_stride_pattern")
def check_llvm():
# BUILD and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), np.sqrt(a.numpy() + 1) * 2 + 2, rtol=1e-5)
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_flip_pipeline():
def check_llvm(nn, base):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base), name="A")
C = te.compute((n,), lambda i: A(nn + base - i - 1), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base)).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[::-1][:n])
check_llvm(4, 0)
check_llvm(128, 8)
check_llvm(3, 0)
check_llvm(128, 1)
@tvm.testing.requires_llvm
def test_llvm_vadd_pipeline():
def check_llvm(n, lanes):
A = te.placeholder((n,), name="A", dtype="float32x%d" % lanes)
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
_, xi = s[C].split(xi, factor=2)
s[C].parallel(xo)
s[C].vectorize(xi)
s[B].compute_at(s[C], xo)
xo, xi = s[B].split(B.op.axis[0], factor=2)
s[B].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.empty((n,), A.dtype).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_llvm(64, 2)
check_llvm(512, 2)
@tvm.testing.requires_llvm
def test_llvm_madd_pipeline():
def check_llvm(nn, base, stride):
n = tvm.runtime.convert(nn)
A = te.placeholder((n + base, stride), name="A")
C = te.compute((n, stride), lambda i, j: A(base + i, j) + 1, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=(n + base, stride)).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros((n, stride), dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy()[base:] + 1)
check_llvm(64, 0, 2)
check_llvm(4, 0, 1)
with tvm.transform.PassContext(config={"tir.noalias": False}):
check_llvm(4, 0, 3)
@tvm.testing.requires_llvm
def test_llvm_temp_space():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A(i) + 1, name="B")
C = te.compute(A.shape, lambda i: B(i) + 1, name="C")
s = te.create_schedule(C.op)
def check_llvm():
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
f(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1 + 1)
check_llvm()
@tvm.testing.requires_llvm
def test_multiple_func():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm():
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
fadd2 = m["fadd2"]
fadd1 = m["fadd1"]
dev = tvm.cpu(0)
# launch the kernel.
n = nn
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd1(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
fadd2(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_llvm()
@tvm.testing.requires_llvm
def test_llvm_condition():
def check_llvm(n, offset):
A = te.placeholder((n,), name="A")
C = te.compute((n,), lambda i: tvm.tir.if_then_else(i >= offset, A[i], 0.0), name="C")
s = te.create_schedule(C.op)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), A.dtype, dev)
f(a, c)
c_np = a.numpy()
c_np[:offset] = 0
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64, 8)
@tvm.testing.requires_llvm
def test_llvm_bool():
def check_llvm(n):
A = te.placeholder((n,), name="A", dtype="int32")
C = te.compute((n,), lambda i: A[i].equal(1).astype("float"), name="C")
s = te.create_schedule(C.op)
# build and invoke the kernel.
f = tvm.build(s, [A, C], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
c = tvm.nd.empty((n,), C.dtype, dev)
f(a, c)
c_np = a.numpy() == 1
tvm.testing.assert_allclose(c.numpy(), c_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero():
def check_llvm(n):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
# build and invoke the kernel.
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_rank_zero_bound_checkers():
def check_llvm(n):
with tvm.transform.PassContext(config={"tir.instrument_bound_checkers": True}):
A = te.placeholder((n,), name="A")
scale = te.placeholder((), name="scale")
k = te.reduce_axis((0, n), name="k")
C = te.compute((), lambda: te.sum(A[k] * scale(), axis=k), name="C")
D = te.compute((), lambda: C() + 1)
s = te.create_schedule(D.op)
# build and invoke the kernel.
f = tvm.build(s, [A, scale, D], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev)
sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev)
d = tvm.nd.empty((), D.dtype, dev)
f(a, sc, d)
d_np = np.sum(a.numpy()) * sc.numpy() + 1
tvm.testing.assert_allclose(d.numpy(), d_np)
check_llvm(64)
@tvm.testing.requires_llvm
def test_alignment():
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] * 3, name="B")
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=8)
s[B].vectorize(tx)
f = tvm.build(s, [A, B], "llvm", name="test_alignment")
lines = f.get_source().split("\n")
# Check alignment on load/store.
for l in lines:
if "align" in l and "4 x float" in l:
assert "align 32" in l
# Check parameter alignment. This looks for the definition of the
# outlined "compute_" function to see if there is an "align" attribute
# listed there.
def has_param_alignment():
for l in lines:
if re.search(r"test_alignment_compute_\([^(]*align [0-9]", l):
return True
return False
if tvm.target.codegen.llvm_version_major() >= 5:
assert has_param_alignment()
# Check for assume intrinsics. This isn't 100% accurate, since it just
# checks if the llvm.assume is there, but detailed check would require
# a much more detailed analysis of the LLVM IR.
def has_call_to_assume():
for l in lines:
if re.search(r"call.*llvm.assume", l):
return True
return False
assert has_call_to_assume()
@tvm.testing.requires_llvm
def test_llvm_div():
"""Check that the semantics of div and mod is correct"""
def check(start, end, dstart, dend, dtype, floor_div=False):
div = tvm.te.floordiv if floor_div else tvm.tir.truncdiv
mod = tvm.te.floormod if floor_div else tvm.tir.truncmod
# A are dividends, B are divisors. Note that we add 1 to make include end in the range.
A = te.placeholder((end - start + 1,), name="A", dtype=dtype)
B = te.placeholder((dend - dstart + 1,), name="B", dtype=dtype)
# We clip values with min and max so that simplifiers know the ranges of values
def clipa(x):
return tvm.te.min(tvm.tir.const(end, dtype), tvm.te.max(tvm.tir.const(start, dtype), x))
def clipb(x):
return tvm.te.min(
tvm.tir.const(dend, dtype), tvm.te.max(tvm.tir.const(dstart, dtype), x)
)
# If the range is just a single point, use the constant itself
if start == end:
def clipa(x):
return tvm.tir.const(start, dtype)
if dstart == dend:
def clipb(x):
return tvm.tir.const(dstart, dtype)
# D are division results and M are modulo results
[D, M] = te.compute(
(end - start + 1, dend - dstart + 1),
lambda i, j: (div(clipa(A[i]), clipb(B[j])), mod(clipa(A[i]), clipb(B[j]))),
)
s = te.create_schedule([D.op, M.op])
f = tvm.build(s, [A, B, D, M], "llvm")
# Fill input arrays with values
A_arr = tvm.nd.empty((end - start + 1,), dtype)
B_arr = tvm.nd.empty((dend - dstart + 1,), dtype)
A_arr.copyfrom(np.arange(start, end + 1, dtype=dtype))
B_np = np.arange(dstart, dend + 1, dtype=dtype)
# If the range of the divisor contains 0, replace it with 1 to avoid division by zero
if dend >= 0 and dstart <= 0:
B_np[-dstart] = 1
B_arr.copyfrom(B_np)
D_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
M_arr = tvm.nd.empty((end - start + 1, dend - dstart + 1), dtype)
# Run the function and convert the results to numpy
f(A_arr, B_arr, D_arr, M_arr)
D_arr = D_arr.numpy()
M_arr = M_arr.numpy()
# This helper just prints additional info on failure
def _show_info():
print("dtype: {}".format(dtype))
print("dividend range: [{}, {}]".format(start, end))
print("divisor range: [{}, {}]".format(dstart, dend))
lowered = tvm.lower(s, [A, B, D, M], simple_mode=True)
print("Lowered code:")
print(lowered)
# Check that the computed values are correct
for i in range(start, end + 1):
for j in range(dstart, dend + 1):
if j == 0:
continue
if floor_div:
dref = i // j
mref = i % j
else:
dref = int(float(i) / j)
mref = int(math.fmod(i, j))
if D_arr[i - start, j - dstart] != dref:
_show_info()
raise AssertionError(
"Incorrect division result: {}({}, {}) is {} "
"but should be {}".format(
div.__name__, i, j, D_arr[i - start, j - dstart], dref
)
)
if M_arr[i - start, j - dstart] != mref:
_show_info()
raise AssertionError(
"Incorrect modulo result: {}({}, {}) is {} "
"but should be {}".format(
mod.__name__, i, j, M_arr[i - start, j - dstart], mref
)
)
# Try different ranges to cover different cases
for start, end in [
(-12, -12),
(-11, -1),
(-11, 0),
(0, 0),
(12, 12),
(1, 11),
(0, 11),
(-11, 11),
]:
for dstart, dend in [
(-11, -1),
(-11, 1),
(-4, -4),
(-2, -2),
(1, 11),
(0, 11),
(4, 4),
(2, 2),
(-11, 11),
]:
if end < start or dend < dstart or (dend == 0 and dstart == 0) or dend == 0:
continue
check(start, end, dstart, dend, "int32", floor_div=False)
check(start, end, dstart, dend, "int32", floor_div=True)
check(start, end, dstart, dend, "int8", floor_div=False)
check(start, end, dstart, dend, "int8", floor_div=True)
if start >= 0 and dstart >= 0:
check(start, end, dstart, dend, "uint32", floor_div=False)
check(start, end, dstart, dend, "uint32", floor_div=True)
# Additional tests for uint8
for dstart, dend in [(0, 11), (1, 11), (2, 2), (4, 4)]:
check(123, 133, dstart, dend, "uint8", floor_div=False)
check(123, 133, dstart, dend, "uint8", floor_div=True)
check(0, 255, dstart, dend, "uint8", floor_div=False)
check(0, 255, dstart, dend, "uint8", floor_div=True)
@tvm.testing.requires_llvm
def test_llvm_fp_math():
def check_llvm_reciprocal(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.div(1.0, (1e37 * A[i])), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), 100, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_reciprocal(4)
check_llvm_reciprocal(8)
check_llvm_reciprocal(16)
def check_llvm_sigmoid(n):
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: te.sigmoid(A[i]), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.full((n,), -1000, "float32"))
b = tvm.nd.empty((n,), "float32")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np.zeros((n,), "float32"))
check_llvm_sigmoid(4)
check_llvm_sigmoid(8)
check_llvm_sigmoid(16)
@tvm.testing.requires_llvm
def test_dwarf_debug_information():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
s[C].parallel(xo)
s[C].vectorize(xi)
def check_llvm_object():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], "llvm")
temp = utils.tempdir()
o_path = temp.relpath("temp.o")
m.save(o_path)
import shutil
import subprocess
import sys
# Try the dwarfdump utility (OS X)
if shutil.which("dwarfdump"):
output = subprocess.check_output(["dwarfdump", o_path])
assert re.search(r"""DW_AT_name\\t\("fadd1"\)""", str(output))
assert re.search(r"""DW_AT_name\\t\("fadd2"\)""", str(output))
# Try gobjdump (OS X)
if shutil.which("gobjdump"):
output = subprocess.check_output(["gobjdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
# Try objdump (Linux) - Darwin objdump has different DWARF syntax.
if shutil.which("objdump") and sys.platform != "darwin":
output = subprocess.check_output(["objdump", "--dwarf", o_path])
assert re.search(r"""DW_AT_name.*fadd1""", str(output))
assert re.search(r"""DW_AT_name.*fadd2""", str(output))
def check_llvm_ir():
if tvm.target.codegen.llvm_version_major() < 5:
return
if tvm.target.codegen.llvm_version_major() > 6:
return
# build two functions
f2 = tvm.lower(s, [A, B, C], name="fadd1")
f1 = tvm.lower(s, [A, B, C], name="fadd2")
m = tvm.build([f1, f2], target="llvm -mtriple=aarch64-linux-gnu")
ll = m.get_source("ll")
# On non-Darwin OS, don't explicitly specify DWARF version.
import re
assert not re.search(r""""Dwarf Version""" "", ll)
assert re.search(r"""llvm.dbg.value""", ll)
# Try Darwin, require DWARF-2
m = tvm.build([f1, f2], target="llvm -mtriple=x86_64-apple-darwin-macho")
ll = m.get_source("ll")
assert re.search(r"""i32 4, !"Dwarf Version", i32 2""", ll)
assert re.search(r"""llvm.dbg.value""", ll)
check_llvm_object()
check_llvm_ir()
@tvm.testing.requires_llvm
def test_llvm_shuffle():
a = te.placeholder((8,), "int32")
b = te.placeholder((8,), "int32")
c = te.compute((8,), lambda x: a[x] + b[7 - x])
sch = te.create_schedule(c.op)
def my_vectorize():
def vectorizer(op):
store = op.body
idx = tvm.tir.Ramp(tvm.tir.const(0, "int32"), tvm.tir.const(1, "int32"), 8)
value = store.value
b_idx = tvm.tir.Shuffle([idx], [tvm.tir.const(i, "int32") for i in range(7, -1, -1)])
new_a = tvm.tir.BufferLoad(value.a.buffer, [idx])
new_b = tvm.tir.BufferLoad(value.b.buffer, [b_idx])
value = new_a + new_b
return tvm.tir.BufferStore(store.buffer, new_a + new_b, [idx])
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="my_vectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, my_vectorize())]}):
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
module = tvm.build(sch, [a, b, c])
a_ = tvm.nd.array(np.arange(1, 9, dtype="int32"))
b_ = tvm.nd.array(np.arange(8, 0, -1, dtype="int32"))
c_ = tvm.nd.array(np.zeros((8,), dtype="int32"))
module(a_, b_, c_)
tvm.testing.assert_allclose(c_.numpy(), (a_.numpy() * 2).astype("int32"))
def np_float2np_bf16(arr):
"""Convert a numpy array of float to a numpy array
of bf16 in uint16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
return np.right_shift(orig + bias, 16).astype("uint16")
def np_float2tvm_bf16(arr):
"""Convert a numpy array of float to a TVM array
of bf16"""
nparr = np_float2np_bf16(arr)
return tvm.nd.empty(nparr.shape, "bfloat16").copyfrom(nparr)
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
def np_bf16_cast_and_cast_back(arr):
"""Convert a numpy array of float to bf16 and cast back"""
return np_bf162np_float(np_float2np_bf16(arr))
@tvm.testing.requires_llvm
def test_llvm_bf16():
def dotest(do_vectorize):
np.random.seed(122)
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
if do_vectorize:
sch[d].vectorize(d.op.axis[0])
module = tvm.build(sch, [A, B, d])
npa = np.random.rand(32).astype("float32")
npb = np.random.rand(32).astype("float32")
va = np_bf16_cast_and_cast_back(npa)
vb = np_bf16_cast_and_cast_back(npb)
res = np_bf16_cast_and_cast_back(va + vb)
a_ = np_float2tvm_bf16(npa)
b_ = np_float2tvm_bf16(npb)
c_ = tvm.nd.empty((32,), "bfloat16")
module(a_, b_, c_)
tvm.testing.assert_allclose(np_bf162np_float(c_.numpy()), res)
dotest(True)
dotest(False)
@tvm.testing.requires_llvm
def test_llvm_crt_static_lib():
A = te.placeholder((32,), dtype="bfloat16")
B = te.placeholder((32,), dtype="bfloat16")
d = te.compute((32,), lambda x: A[x] + B[x])
sch = te.create_schedule(d.op)
module = tvm.build(
sch,
[A, B, d],
target=tvm.target.Target("llvm"),
runtime=Runtime("crt", {"system-lib": True}),
)
print(module.get_source())
module.save("test.o")
def atomic_add(x, y):
return tvm.tir.call_intrin(y.dtype, "tir.atomic_add", x, y)
@tvm.testing.requires_llvm
def test_llvm_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
with ib.for_range(0, n, name="i", kind="parallel") as i:
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
A = tvm.te.placeholder((100,), dtype="int32", name="A")
C = tvm.te.extern((100,), [A], lambda ins, _: do_atomic_add(ins[0]), name="C", dtype="int32")
s = tvm.te.create_schedule(C.op)
# This does not work because of pointer type mismatch
# TVMError: LLVM module verification failed with the following errors:
# Argument value type does not match pointer operand type!
# %21 = atomicrmw add i8* %7, i32 1 monotonic
# i8
# f = tvm.build(s, [A], target="llvm")
@tvm.testing.requires_llvm
@tvm.testing.requires_gpu
def test_llvm_gpu_lower_atomic():
def do_atomic_add(A):
ib = tvm.tir.ir_builder.create()
n = A.shape[0]
atomic_add_return = ib.allocate(A.dtype, (1,), name="atomic_add_return", scope="local")
one = tvm.tir.const(1, A.dtype)
A_ptr = ib.buffer_ptr(A)
nthread_tx = 64
with ib.new_scope():
nthread_bx = (n + nthread_tx - 1) // nthread_tx
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
atomic_add_return[0] = atomic_add(
tvm.tir.call_intrin("handle", "tir.address_of", A_ptr[0]), one
)
return ib.get()
size = 1024
# CI uses LLVM 8, which does not support float atomic
for dtype in ["int32"]:
A = tvm.te.placeholder((size,), dtype=dtype, name="A")
C = tvm.te.extern((size,), [A], lambda ins, _: do_atomic_add(ins[0]), dtype=dtype)
s = tvm.te.create_schedule(C.op)
f = tvm.build(s, [A], target="nvptx")
dev = tvm.cuda()
a = tvm.nd.array(np.zeros((size,)).astype(A.dtype), dev)
f(a)
ref = np.zeros((size,)).astype(A.dtype)
ref[0] = size
tvm.testing.assert_allclose(a.numpy(), ref, rtol=1e-5)
@tvm.testing.requires_llvm
def test_llvm_order_functions():
"""Check that functions in the LLVM module are ordered alphabetically."""
# Note: the order is alphabetical because that's a predictable ordering. Any predictable
# ordering will work fine, but if the ordering changes, this test will need to be updated.
def make_call_extern(caller, callee):
# Create a function:
# float32 caller(float32 v) { return callee(v); }
ib = tvm.tir.ir_builder.create()
v = tvm.te.var("v", dtype="float32")
t = tvm.tir.call_extern("float32", callee, v)
ib.emit(t)
return tvm.tir.PrimFunc([v], ib.get()).with_attr("global_symbol", caller)
# Create some functions in a random order.
functions = {
"Danny": make_call_extern("Danny", "Dave"),
"Sammy": make_call_extern("Sammy", "Eve"),
"Kirby": make_call_extern("Kirby", "Fred"),
}
mod = tvm.IRModule(functions=functions)
ir_text = tvm.build(mod, None, target="llvm").get_source("ll")
# Skip functions whose names start with _.
matches = re.findall(r"^define[^@]*@([a-zA-Z][a-zA-Z0-9_]*)", ir_text, re.MULTILINE)
assert matches == sorted(matches)
@tvm.testing.requires_llvm
@tvm.testing.skip_if_32bit
def test_llvm_import():
"""all-platform-minimal-test: check shell dependent clang behavior."""
# extern "C" is necessary to get the correct signature
cc_code = """
extern "C" float my_add(float x, float y) {
return x + y;
}
"""
n = 10
A = te.placeholder((n,), name="A")
B = te.compute(
(n,), lambda *i: tvm.tir.call_pure_extern("float32", "my_add", A(*i), 1.0), name="B"
)
def check_llvm(use_file):
if not clang.find_clang(required=False):
print("skip because clang is not available")
return
temp = utils.tempdir()
ll_path = temp.relpath("temp.ll")
ll_code = clang.create_llvm(cc_code, output=ll_path)
s = te.create_schedule(B.op)
if use_file:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_path)
else:
s[B].pragma(s[B].op.axis[0], "import_llvm", ll_code)
# BUILD and invoke the kernel.
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1.0)
check_llvm(use_file=True)
check_llvm(use_file=False)
@tvm.testing.requires_llvm
def test_llvm_scalar_concat():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.decl_buffer((1,), "int32x2")
s = tvm.tir.Shuffle([x, y], [0, 1])
f = tvm.tir.PrimFunc([x, y, z], z.vstore(0, s))
mod = tvm.ir.IRModule.from_expr(f.with_attr("global_symbol", "codegen_scalar_concat"))
# This will crash in LLVM codegen if CodeGenLLVM::CreateVecConcat doesn't convert
# scalars to single-lane LLVM vectors.
with tvm.transform.PassContext(config={"tir.disable_assert": True}):
m = tvm.build(mod, [x, y, z], target="llvm")
@tvm.testing.requires_llvm
def test_raise_exception_during_codegen():
@T.prim_func
def threadpool_nested_parallel_loop(
A: T.Buffer((4, 4), "float32"), B: T.Buffer((4, 4), "float32")
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i in T.parallel(4):
for j in T.parallel(4):
B[i, j] = A[i, j] * 2.0
with pytest.raises(tvm.TVMError) as e:
tvm.build({"llvm": tvm.IRModule.from_expr(threadpool_nested_parallel_loop)})
msg = str(e)
assert msg.find("Nested parallel loop is not supported") != -1
@tvm.testing.requires_llvm
def test_llvm_target_attributes():
"""Check that when LLVM codegen creates new functions, they get the same target
attributes as the original function.
"""
n = te.var()
A = te.placeholder((n,), name="A", dtype="float32")
B = te.compute((n,), lambda i: A[i], name="B")
C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], nparts=2)
s[C].parallel(xo)
target_llvm = "llvm -mcpu=skylake -mattr=+avx512f"
target = tvm.target.Target(target_llvm, host=target_llvm)
module = tvm.build(s, [A, B, C, n], target=target, name="test_func")
llvm_ir = module.get_source()
llvm_ir_lines = llvm_ir.split("\n")
attribute_definitions = dict()
attributes_with_target = dict()
functions_with_target = []
for line in llvm_ir_lines:
func_def = re.match(
"define.* @(?P<func_name>[^(]*)[(].* #(?P<attr_num>[0-9]+) (!.* |){$", line
)
if func_def:
functions_with_target.append(func_def.group("func_name"))
attributes_with_target[func_def.group("attr_num")] = True
continue
attr_def = re.match("attributes #(?P<attr_num>[0-9]+) = {(?P<attr_list>.*)}", line)
if attr_def:
attribute_definitions[attr_def.group("attr_num")] = attr_def.group("attr_list")
for k in list(attributes_with_target.keys()):
assert re.match('.*"target-cpu"="skylake".*', attribute_definitions[k])
assert re.match('.*"target-features"=".*[+]avx512f.*".*', attribute_definitions[k])
expected_functions = ["test_func", "test_func_compute_", "__tvm_parallel_lambda"]
for n in expected_functions:
assert n in functions_with_target
@tvm.testing.requires_llvm
def test_llvm_assume():
"""
Check that LLVM does not error out when generating code with tir.assume.
Verifying for llvm.assume being generated is not easy as the intrinsic and its
related instructions get removed during optimizations
"""
@T.prim_func
def tir_assume_func(A: T.Buffer((4, 4), "int32"), B: T.Buffer((14,), "int32")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A_1 = T.Buffer((16,), "int32", data=A.data)
for axis0, axis1 in T.grid(4, 4):
T.assume(axis0 < 3 or axis1 < 2 or A_1[axis0 * 4 + axis1] == 0)
for i in range(14):
B_1 = T.Buffer((14,), "int32", data=B.data)
B_1[i] = A_1[i] * 2
mod = tvm.IRModule.from_expr(tir_assume_func)
inp = te.placeholder((4, 4), name="A", dtype="int32")
out = te.placeholder((14,), name="B", dtype="int32")
m = tvm.build(mod, [inp, out], target="llvm")
@tvm.testing.requires_llvm
def test_debug_symbol_for_float64():
"""Check that LLVM can define DWARF debug type for float64
In previous versions, only specific data types could exist in the
function signature. In this test, the "calling_conv" attribute
prevents lowering to the PackedFunc API.
"""
@T.prim_func
def func(a: T.handle("float64"), b: T.handle("float64"), n: T.int64):
T.func_attr({"calling_conv": 2})
A = T.Buffer(16, "float64", data=a)
B = T.Buffer(16, "float64", data=b)
for i in range(n):
B[i] = A[i]
tvm.build(func, target="llvm")
@tvm.testing.requires_llvm
def test_subroutine_call():
@I.ir_module
class mod:
@T.prim_func
def main(A: T.Buffer(1, dtype="float32")):
T.func_attr({"global_symbol": "main"})
mod.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
# The calling_conv parameter is to prevent MakePackedAPI
# from changing the call signature of the subroutine.
T.func_attr({"global_symbol": "subroutine", "calling_conv": -1})
A = T.decl_buffer(1, dtype="float32", data=A_data)
A[0] = 42.0
target = "llvm"
dev = tvm.cpu()
built = tvm.build(mod)
arr = tvm.nd.array(np.zeros([1], "float32"), device=dev)
built["main"](arr)
assert arr.numpy()[0] == 42.0
@tvm.testing.requires_llvm
def test_call_packed_returning_void():
"""Allow codegen of PackedFunc calls returning void
The LLVM codegen uses the CallNode's dtype to cast the return type
of the PackedFunc into the appropriate LLVM output type. However,
there is no API type for `DataType::Void()`. When the return type
of a PackedFunc is void, the generated code should not attempt to
read the return value.
While `T.call_packed()` will produce a CallNode with an output
dtype of "int32", the use of other return types is valid in TIR.
This test case uses `T.Call` directly to allow an explicit dtype
for the packed function call.
"""
@T.prim_func
def func():
T.Call(
"void",
tvm.ir.Op.get("tir.tvm_call_packed"),
["dummy_function_name"],
)
# Error occurred during build, as part of
# CodeGenCPU::MakeCallPackedLowered.
built = tvm.build(func, target="llvm")
@tvm.testing.requires_llvm
def test_call_packed_without_string_arg():
"""The first argument to tvm_call_packed must be a string
Even if the invalid TIR is constructed, this should throw an
exception to exit cleanly. Previously, use of
`args[0].as<StringImmNode>()` without a null check resulted in
a segfault during codegen.
"""
@T.prim_func
def func(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "func"})
T.Call("int32", tvm.ir.Op.get("tir.tvm_call_packed"), [A.data])
with pytest.raises(tvm.TVMError):
built = tvm.build(func, target="llvm")
@tvm.testing.requires_llvm
def test_call_extern_returning_void():
"""Like test_call_packed_returning_void, but for call_extern"""
@T.prim_func
def func():
T.func_attr({"global_symbol": "func"})
T.Call("void", tvm.ir.Op.get("tir.call_extern"), ["dummy_function_name"])
built = tvm.build(func, target="llvm")
if __name__ == "__main__":
tvm.testing.main()
| 39,503 | 34.4614 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_flatten_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.transform.Sequential(
[
tvm.tir.transform.FlattenBuffer(),
tvm.tir.transform.Simplify(),
]
)
class TestElementwise(BaseCompare):
"""2-d buffers are flattened to 1-d"""
def before(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")):
for i in T.serial(0, 16):
B_new = T.decl_buffer([1, 16], "float32")
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(input_A: T.Buffer((16, 16), "float32"), input_C: T.Buffer((16, 16), "float32")):
A = T.Buffer(256, dtype="float32", data=input_A.data)
C = T.Buffer(256, dtype="float32", data=input_C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", scope="global")
B_new = T.Buffer([16], "float32", scope="global", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0
class TestElementwiseWithoutDeclBuffer(BaseCompare):
"""2-d buffers are flattened to 1-d
Like TestElementwise, but the TIR doesn't have the DeclBuffer
node. The T.Buffer declaration applies only during the
parsing the TVMScript, and doesn't occur in the TIR itself. In
this case, the allocation should be assumed to be targeting flat
memory, and should be flattened to a 1-d allocation.
"""
def before(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")):
for i in T.serial(0, 16):
B_new_data = T.allocate([1, 16], "float32", "global")
B_new = T.Buffer([1, 16], "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[0, j] = A[i, j] + 1.0
for j in T.serial(0, 16):
C[i, j] = B_new[0, j] * 2.0
def expected(input_A: T.Buffer((16, 16), "float32"), input_C: T.Buffer((16, 16), "float32")):
A = T.Buffer(256, dtype="float32", data=input_A.data)
C = T.Buffer(256, dtype="float32", data=input_C.data)
for i in T.serial(0, 16):
B_new_data = T.allocate([16], "float32", "global")
B_new = T.Buffer(16, "float32", data=B_new_data)
for j in T.serial(0, 16):
B_new[j] = A[((i * 16) + j)] + 1.0
for j in T.serial(0, 16):
C[((i * 16) + j)] = B_new[j] * 2.0
class TestGPU(BaseCompare):
"""Buffer flattening may have indices based on GPU thread vars"""
def before(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")):
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B = T.decl_buffer([1, 16], "float32", scope="local")
for j in range(0, 16):
B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0
for j in range(0, 16):
C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0
def expected(input_A: T.Buffer((16, 16), "float32"), input_C: T.Buffer((16, 16), "float32")):
A = T.Buffer(256, dtype="float32", data=input_A.data)
C = T.Buffer(256, dtype="float32", data=input_C.data)
i0 = T.env_thread("blockIdx.x")
i1 = T.env_thread("threadIdx.x")
i2 = T.env_thread("vthread")
T.launch_thread(i0, 4)
T.launch_thread(i1, 2)
T.launch_thread(i2, 2)
B_data = T.allocate([16], "float32", scope="local")
B = T.Buffer([16], "float32", scope="local", data=B_data)
for j in range(0, 16):
B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + j] + 1.0
for j in range(0, 16):
C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * 2.0
class TestSymbolic(BaseCompare):
"""Dynamically-sized arrrays are flattened"""
def before(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
A = T.match_buffer(a, (n, m), "float32")
C = T.match_buffer(c, (n, m), "float32")
for i in range(0, n):
B = T.decl_buffer([m], "float32")
for j in range(0, m):
B[j] = A[i, j] + 1.0
for j in range(0, m):
C[i, j] = B[j] * 2.0
def expected(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None:
input_A = T.match_buffer(a, (n, m), "float32")
input_C = T.match_buffer(c, (n, m), "float32")
A = T.Buffer(n * m, "float32", data=input_A.data)
C = T.Buffer(n * m, "float32", data=input_C.data)
for i in range(0, n):
B_data = T.allocate([m], "float32", scope="global")
B = T.Buffer([m], "float32", scope="global", data=B_data)
for j in range(0, m):
B[j] = A[i * m + j] + 1.0
for j in range(0, m):
C[i * m + j] = B[j] * 2.0
class TestFusedSymbolic(BaseCompare):
"""Dynamically-sized arrrays with fused iterator which can be flattened"""
def before(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (32, n, n), "float32")
B = T.match_buffer(b, (32, n, n), "float32")
for i in range(0, n * n * 32):
B[i // (n * n), (i % (n * n)) // n, i % n] = A[i // (n * n), (i % (n * n)) // n, i % n]
def expected(a: T.handle, b: T.handle, n: T.int32) -> None:
input_A = T.match_buffer(a, (32, n, n), "float32")
input_B = T.match_buffer(b, (32, n, n), "float32")
A = T.Buffer(n * n * 32, "float32", data=input_A.data)
B = T.Buffer(n * n * 32, "float32", data=input_B.data)
for i in range(0, n * n * 32):
B[i] = A[i]
class TestFusedSymbolicWithPredicate(BaseCompare):
"""Dynamically-sized arrrays with fused iterator which can be flattened with extra predicate"""
def before(a: T.handle, b: T.handle, n: T.int32) -> None:
A = T.match_buffer(a, (32, n, n), "float32")
B = T.match_buffer(b, (32, n, n), "float32")
for bx, tx in T.grid((n * n + 1) // 2, 64):
if bx * 64 + tx < n * n * 32:
B[
(bx * 64 + tx) // (n * n), ((bx * 64 + tx) % (n * n)) // n, (bx * 64 + tx) % n
] = A[
(bx * 64 + tx) // (n * n), ((bx * 64 + tx) % (n * n)) // n, (bx * 64 + tx) % n
]
def expected(a: T.handle, b: T.handle, n: T.int32) -> None:
input_A = T.match_buffer(a, (32, n, n), "float32")
input_B = T.match_buffer(b, (32, n, n), "float32")
A = T.Buffer(n * n * 32, "float32", data=input_A.data)
B = T.Buffer(n * n * 32, "float32", data=input_B.data)
for bx, tx in T.grid((n * n + 1) // 2, 64):
if bx * 64 + tx < n * n * 32:
B[bx * 64 + tx] = A[bx * 64 + tx]
class TestMultiAlloc(BaseCompare):
"""If multiple allocations occur, all are flattened."""
def before(A: T.Buffer((4, 32), "float32"), D: T.Buffer((4, 32), "float32")):
for i, j in T.grid(4, 32):
B = T.decl_buffer((4, 32), "float32", scope="global")
C = T.decl_buffer((4, 32), "float32", scope="global")
B[i, j] = A[i, j] + 1.0
C[i, j] = A[i, j] + B[i, j]
D[i, j] = C[i, j] * 2.0
def expected(input_A: T.Buffer((4, 32), "float32"), input_D: T.Buffer((4, 32), "float32")):
A = T.Buffer(128, "float32", data=input_A.data)
D = T.Buffer(128, "float32", data=input_D.data)
for i, j in T.grid(4, 32):
B_data = T.allocate([128], "float32", scope="global")
B = T.Buffer([128], "float32", scope="global", data=B_data)
C_data = T.allocate([128], "float32", scope="global")
C = T.Buffer([128], "float32", scope="global", data=C_data)
B[i * 32 + j] = A[i * 32 + j] + 1.0
C[i * 32 + j] = A[i * 32 + j] + B[i * 32 + j]
D[i * 32 + j] = C[i * 32 + j] * 2.0
class TestStrided(BaseCompare):
"""Indices for flattened buffers use the specified striding."""
def before(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")):
for i0 in T.serial(4):
B = T.decl_buffer([4, 17], "float32")
B_1 = T.Buffer([4, 16], dtype="float32", data=B.data, strides=[17, 1])
for i1, j in T.grid(4, 16):
B_1[i1, j] = A[i0 * 4 + i1, j] + 1.0
for i1, j in T.grid(4, 16):
C[i0 * 4 + i1, j] = B_1[i1, j] * 2.0
def expected(input_A: T.Buffer((16, 16), "float32"), input_C: T.Buffer((16, 16), "float32")):
A = T.Buffer(256, dtype="float32", data=input_A.data)
C = T.Buffer(256, dtype="float32", data=input_C.data)
for i0 in T.serial(0, 4):
B_new_data = T.allocate([68], "float32", scope="global")
B_new = T.Buffer([68], "float32", scope="global", data=B_new_data)
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
B_new[i1 * 17 + j] = A[i0 * 64 + i1 * 16 + j] + 1.0
for i1 in T.serial(0, 4):
for j in T.serial(0, 16):
C[i0 * 64 + i1 * 16 + j] = B_new[i1 * 17 + j] * 2.0
class TestBoolean(BaseCompare):
"""Boolean buffers should be replaced by a backing int8 array"""
def before(A: T.Buffer(10, "bool"), B: T.Buffer(10, "bool")) -> None:
for i0 in T.serial(10):
B[i0] = A[i0]
def expected(input_A: T.Buffer(10, "bool"), input_B: T.Buffer(10, "bool")) -> None:
A = T.Buffer(10, dtype="int8", data=input_A.data)
B = T.Buffer(10, dtype="int8", data=input_B.data)
# body
for i0 in T.serial(10):
B[i0] = T.cast(T.cast(A[i0], "bool"), "int8")
class TestLowerTE(BaseCompare):
"""FlattenBuffer should do nothing on TE-based functions"""
def before(self):
x = te.placeholder((1,))
y = te.compute((1,), lambda i: x[i] + 2)
s = te.create_schedule(y.op)
mod = tvm.driver.build_module.schedule_to_module(s, [x, y])
return mod["main"]
expected = before
class TestFlattenInsideBlock(BaseCompare):
"""Flattening access inside a block flattens the accessed region."""
def before():
A = T.alloc_buffer([32, 32])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i, j])
T.evaluate(A[i, j])
def expected():
A = T.alloc_buffer([1024])
for i, j in T.grid(32, 32):
with T.block("block"):
T.reads(A[i * 32 + j])
T.evaluate(A[i * 32 + j])
class TestNoChangeTo2DPhysicalBuffer(BaseCompare):
"""Flattening preserves axis separators."""
def before():
A = T.alloc_buffer([32, 32], axis_separators=[1])
for i, j in T.grid(32, 32):
T.evaluate(A[i, j])
expected = before
class TestFlattenAllocBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators"""
def before():
A = T.alloc_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A = T.alloc_buffer([30, 1001], axis_separators=[1])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5])
class TestFlattenDeclBufferWithAxisSeparators(BaseCompare):
"""Flattening preserves axis separators
Like TestFlattenAllocBufferWithAxisSeparators, but the allocations
is done using Allocate/DeclBuffer, rather than through
BlockNode::alloc_buffers.
"""
def before():
A = T.decl_buffer([2, 3, 5, 7, 11, 13], axis_separators=[3])
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0, i1, i2, i3, i4, i5])
def expected():
A_data = T.allocate([30, 1001], dtype="float32", scope="global")
A = T.Buffer([30, 1001], dtype="float32", scope="global", axis_separators=[1], data=A_data)
for i0, i1, i2, i3, i4, i5 in T.grid(2, 3, 5, 7, 11, 13):
T.evaluate(A[i0 * 15 + i1 * 5 + i2, i3 * 143 + i4 * 13 + i5])
def test_lower_2d_physical_memory():
"""Axis separators should preserve 2-d buffers through lowering.
A catch-all test to ensure that defining axis_separators is
sufficient to maintain non-flat buffer descriptions through all
lowering steps.
"""
# This test doesn't use CompareBeforeAfter, because the after step
# is not currently expressible in TVMScript. This test can be
# re-written after https://github.com/apache/tvm/pull/12412.
@T.prim_func
def func():
buf = T.alloc_buffer(
[1, 1],
dtype="int32",
scope="global",
axis_separators=[1],
)
buf[0, 0] = 0
lowered = tvm.lower(func)["main"]
assert isinstance(lowered.body, tvm.tir.Allocate)
assert list(lowered.body.extents) == [1, 1], (
"Non-flat buffer allocations, "
"marked by axis_separators, "
"flattened to flat memory allocation."
)
if __name__ == "__main__":
tvm.testing.main()
| 14,303 | 37.555256 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_random_compute_location.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.script import tir as T
from tvm.target import Target
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
@tvm.script.ir_module
class Add:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [2048, 2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048, 2048], dtype="float32")
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
# body
for i, j, k in T.grid(2048, 2048, 2048):
with T.block("move"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes([A_cached[vi, vj, vk]])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads([A_cached[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
# pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
# fmt: on
def test_random_compute_location():
@T.prim_func
def add_0(
A: T.Buffer((2048, 2048, 2048), "float32"),
B: T.Buffer((2048, 2048, 2048), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
# body
# with T.block("root")
A_cached = T.alloc_buffer([2048, 2048, 2048], dtype="float32")
for i0, j0, i1, j1, k0, i2 in T.grid(128, 64, 4, 4, 64, 4):
for ax0, ax1, ax2 in T.grid(1, 8, 32):
with T.block("move"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2 + ax0)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + ax1)
vk = T.axis.spatial(2048, k0 * 32 + ax2)
T.reads(A[vi, vj, vk])
T.writes(A_cached[vi, vj, vk])
A_cached[vi, vj, vk] = A[vi, vj, vk]
for j2, k1 in T.grid(8, 32):
with T.block("add"):
vi = T.axis.spatial(2048, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(2048, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(2048, k0 * 32 + k1)
T.reads(A_cached[vi, vj, vk])
T.writes(B[vi, vj, vk])
B[vi, vj, vk] = A_cached[vi, vj, vk] + T.float32(1)
decision_0 = [
("SampleComputeLocation", 5),
]
mod = Add
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=None,
sch_rules=[ms.schedule_rule.RandomComputeLocation()],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[add_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
test_random_compute_location()
| 4,353 | 38.581818 | 142 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_unbound_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.script import tir as T
from tvm.target import Target
def _target() -> Target:
return Target("cuda --max_threads_per_block=1024", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[ms.postproc.RewriteUnboundBlock()],
mutator_probs={},
),
task_name="test",
)
return ctx
# pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
@tvm.script.ir_module
class Before_cooperative_fetch:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@tvm.script.ir_module
class After_cooperative_fetch:
@T.prim_func
def main(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i_j_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i_j_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) // 512)
vj = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) % 512)
B[vi, vj] = A[vi, vj] + 1.0
@tvm.script.ir_module
class Before_norm_bmn:
@T.prim_func
def main(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer((1,), "float32")) -> None:
C = T.alloc_buffer([1], dtype="float32")
for i0, i1, i2 in T.grid(1, 256, 256):
with T.block("C"):
b, i, j = T.axis.remap("SRR", [i0, i1, i2])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0 in T.serial(1):
with T.block("D"):
b = T.axis.S(1, i0)
D[b] = T.sqrt(C[b], dtype="float32")
@tvm.script.ir_module
class After_norm_bmn:
@T.prim_func
def main(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer((1,), "float32")) -> None:
C = T.alloc_buffer([1], dtype="float32")
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i1, i2 in T.grid(256, 256):
with T.block("C"):
b = T.axis.S(1, 0)
i, j = T.axis.remap("RR", [i1, i2])
with T.init():
C[b] = T.float32(0)
C[b] = C[b] + A[b, i, j] * A[b, i, j]
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("D"):
b = T.axis.S(1, 0)
D[b] = T.sqrt(C[b], dtype="float32")
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape:
@T.prim_func
def main(
placeholder: T.Buffer((12, 64, 64), "float32"), T_reshape: T.Buffer((64, 768), "float32")
) -> None:
for i0_i1_fused_0, i0_i1_fused_1 in T.grid(1536, 32):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(64, (i0_i1_fused_0 * 32 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 32 + i0_i1_fused_1) % 768)
T.reads(placeholder[ax1 % 768 // 64, (ax1 // 768 + ax0) % 64, ax1 % 64])
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) // 64 + ax1 % 768 // 64) % 12,
(ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) % 64,
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_large:
@T.prim_func
def main(
placeholder: T.Buffer((12, 64, 64), "float32"), T_reshape: T.Buffer((64, 768), "float32")
) -> None:
for i0_i1_fused_0, i0_i1_fused_1 in T.grid(1536000, 32):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(64, (i0_i1_fused_0 * 32 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 32 + i0_i1_fused_1) % 768)
T.reads(placeholder[ax1 % 768 // 64, (ax1 // 768 + ax0) % 64, ax1 % 64])
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) // 64 + ax1 % 768 // 64) % 12,
(ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) % 64,
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_after_rub:
@T.prim_func
def main(
placeholder: T.Buffer((12, 64, 64), "float32"), T_reshape: T.Buffer((64, 768), "float32")
) -> None:
for i0_i1_fused_0_i0_i1_fused_1_fused_0 in T.thread_binding(48, thread="blockIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(
64,
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
// 768,
)
ax1 = T.axis.spatial(
768,
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
% 768,
)
T.reads(placeholder[ax1 % 768 // 64, (ax1 // 768 + ax0) % 64, ax1 % 64])
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) // 64 + ax1 % 768 // 64) % 12,
(ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) % 64,
ax1 % 64 % 64,
]
@tvm.script.ir_module
class Bert_fused_reshape_transpose_reshape_after_rub_large:
@T.prim_func
def main(
placeholder: T.Buffer((12, 64, 64), "float32"), T_reshape: T.Buffer((64, 768), "float32")
) -> None:
# body
# with T.block("root")
for i0_i1_fused_0_i0_i1_fused_1_fused_1 in T.thread_binding(256, thread="blockIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_2 in T.thread_binding(1024, thread="threadIdx.x"):
for i0_i1_fused_0_i0_i1_fused_1_fused_0 in T.serial(188):
with T.block("T_reshape_1"):
ax0 = T.axis.spatial(
64,
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
// 768,
)
ax1 = T.axis.spatial(
768,
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 262144
+ i0_i1_fused_0_i0_i1_fused_1_fused_1 * 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
)
% 768,
)
T.where(
(
i0_i1_fused_0_i0_i1_fused_1_fused_0 * 256
+ i0_i1_fused_0_i0_i1_fused_1_fused_1
)
* 1024
+ i0_i1_fused_0_i0_i1_fused_1_fused_2
< 49152000
)
T.reads(placeholder[ax1 % 768 // 64, (ax1 // 768 + ax0) % 64, ax1 % 64])
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = placeholder[
((ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) // 64 + ax1 % 768 // 64)
% 12,
(ax1 % 64 // 64 + (ax1 // 768 + ax0) % 64) % 64,
ax1 % 64 % 64,
]
@T.prim_func
def before_unrolled_loop(
placeholder: T.Buffer((1, 56, 56, 64), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
for i2_0, i3_0, i2_1, i3_1 in T.grid(98, 4, 2, 16):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, i2_0 * 2 + i2_1)
co = T.axis.spatial(64, i3_0 * 16 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co]
@T.prim_func
def after_unrolled_loop(
placeholder: T.Buffer((1, 56, 56, 64), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(13, thread="blockIdx.x"):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(
196,
(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
// 128
* 2
+ (
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 32
// 16,
)
co = T.axis.spatial(
64,
(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 128
// 32
* 16
+ (
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024
+ i2_0_i3_0_i2_1_i3_1_fused_1
)
% 16,
)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.where(
i2_0_i3_0_i2_1_i3_1_fused_0 * 1024 + i2_0_i3_0_i2_1_i3_1_fused_1
< 12544
)
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = (
inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co]
)
# pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
# fmt: on
def test_rewrite_cooperative_fetch():
mod = Before_cooperative_fetch
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, After_cooperative_fetch)
def test_rewrite_norm_bmn():
mod = Before_norm_bmn
target = _target()
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, After_norm_bmn)
def test_rewrite_cuda_loop_split_no_reduction():
mod = Bert_fused_reshape_transpose_reshape
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Bert_fused_reshape_transpose_reshape_after_rub)
def test_rewrite_cuda_loop_split_no_reduction_large():
mod = Bert_fused_reshape_transpose_reshape_large
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Bert_fused_reshape_transpose_reshape_after_rub_large)
def test_rewrite_cuda_loop_split_for_kind():
mod = before_unrolled_loop
target = Target("nvidia/nvidia-v100", host="llvm")
ctx = _create_context(mod, target)
sch = tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
assert ctx.space_generator.postprocs[0].apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], after_unrolled_loop)
if __name__ == "__main__":
test_rewrite_cooperative_fetch()
test_rewrite_norm_bmn()
test_rewrite_cuda_loop_split_no_reduction()
test_rewrite_cuda_loop_split_no_reduction_large()
test_rewrite_cuda_loop_split_for_kind()
| 16,299 | 42.121693 | 142 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_instruction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
# mypy: ignore-errors
import sys
import pytest
import tvm.testing
from tvm.tir.schedule import BlockRV, Instruction, InstructionKind, LoopRV
def test_inst_kind_get():
kind = InstructionKind.get("EnterPostproc")
assert not kind.is_pure
assert kind.name == "EnterPostproc"
def test_inst_construct_1():
block = BlockRV()
loop0 = LoopRV()
loop1 = LoopRV()
inst = Instruction(
kind=InstructionKind.get("GetLoops"),
inputs=[block],
attrs=[],
outputs=[loop0, loop1],
)
assert str(inst) == "_, _ = sch.get_loops(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 2
assert inst.kind.same_as(InstructionKind.get("GetLoops"))
assert inst.inputs[0].same_as(block)
assert inst.outputs[0].same_as(loop0)
assert inst.outputs[1].same_as(loop1)
def test_inst_construct_2():
block = BlockRV()
inst = Instruction(
kind=InstructionKind.get("ComputeInline"),
inputs=[block],
attrs=[],
outputs=[],
)
assert str(inst) == "sch.compute_inline(block=_)"
assert len(inst.inputs) == 1
assert len(inst.attrs) == 0
assert len(inst.outputs) == 0
assert inst.kind.same_as(InstructionKind.get("ComputeInline"))
assert inst.inputs[0].same_as(block)
if __name__ == "__main__":
tvm.testing.main()
| 2,237 | 30.971429 | 74 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_rewrite_unsafe_select.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_rewrite_Select():
ib = tvm.tir.ir_builder.create()
A = ib.allocate("float32", 100, name="A", scope="global")
i = te.var("i")
y = tvm.tir.Select(i > 1, A[i - 1], 1.0)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(y)))
yy = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
z = tvm.tir.Select(tvm.tir.Select(i > 1, A[i - 1], 1.0) > 0.0, A[i], 0.1)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(z)))
zz = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
a = tvm.tir.Select(tvm.tir.floordiv(i, 4) > 10, y, z)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i], tvm.tir.Evaluate(a)))
aa = tvm.tir.transform.RewriteUnsafeSelect()(mod)["main"].body.value
builtin_if_then_else = tvm.ir.Op.get("tir.if_then_else")
assert yy.op.same_as(builtin_if_then_else)
assert yy.op.same_as(builtin_if_then_else)
assert isinstance(aa, tvm.tir.Select)
if __name__ == "__main__":
test_rewrite_Select()
| 1,851 | 38.404255 | 77 | py |
tvm | tvm-main/tests/python/unittest/test_arith_solve_linear_inequality.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
import sys
import pytest
import tvm
from tvm import te, arith, ir, tir, testing
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11458")
def test_solution_consistency():
seed = random.randrange(sys.maxsize)
print(
"\nThis test is intentionally non-deterministic, "
"if it fails please report it in github issue together with this seed {}\n".format(seed)
)
random.seed(seed)
def _check(variables, formulas, coef=(-5, 5), bounds=(-20, 20)):
vs = [te.var("x" + str(i)) for i in range(variables)]
fs = []
for i in range(formulas):
s1 = sum([v * random.randint(coef[0], coef[1]) for v in vs])
s1 += random.randint(coef[0], coef[1])
s2 = sum([v * random.randint(coef[0], coef[1]) for v in vs])
s2 += random.randint(coef[0], coef[1])
op = random.choice([tir.expr.EQ, tir.expr.LE, tir.expr.LT, tir.expr.GE, tir.expr.GT])
fs.append(op(s1, s2))
vranges = {v: tvm.ir.expr.Range(bounds[0], bounds[1] + 1) for v in vs}
before = te.all(tir.const(1, "bool"), *fs)
after = arith._ffi_api.SolveInequalitiesAsCondition(vs, vranges, fs)
after = te.all(tir.const(1, "bool"), *after)
testing.check_bool_expr_is_true(before == after, vranges)
solution = arith.solve_linear_inequalities(fs, vs, vranges, deskew_range=True)
testing.check_int_constraints_trans_consistency(solution)
for i in range(3):
_check(1, 1)
for i in range(3):
_check(1, 2)
for i in range(3):
_check(2, 1)
for i in range(3):
_check(2, 2)
for i in range(3):
_check(2, 3)
# Somewhere here coefficients in the results become too large, leading to overflow,
# so we use smaller initial coefficients
for i in range(5):
_check(3, 3, coef=(-2, 2))
for i in range(5):
_check(3, 4, coef=(-2, 2))
for i in range(5):
_check(4, 3, coef=(-1, 1))
for i in range(5):
_check(10, 2, coef=(-1, 1), bounds=(0, 4))
for i in range(5):
_check(10, 3, coef=(0, 1), bounds=(0, 4))
def test_dual_variable():
x, y = te.var("x"), te.var("y")
variables = [x, y]
ranges = {
x: tvm.ir.Range(-100, 100),
y: tvm.ir.Range(0, 10),
}
problem = [
tvm.tir.LE(x + y, 20),
tvm.tir.GE(x - y, 10),
]
# solution as conditions
solution = arith._ffi_api.SolveInequalitiesAsCondition(variables, ranges, problem)
assert ir.structural_equal(solution[0], x >= (y + 10))
assert ir.structural_equal(solution[1], x <= (20 - y))
assert ir.structural_equal(solution[2], y >= 0)
assert ir.structural_equal(solution[3], y <= 5)
# solve and get the ranges
solution = arith.solve_linear_inequalities(problem, variables, ranges)
# 0 <= y <=5
assert solution.ranges[y].min == 0
assert solution.ranges[y].extent == 6
# y + 10 <= x <= 20 - y
assert ir.structural_equal(solution.ranges[x].min, y + 10)
assert solution.ranges[x].extent == 11 # max(10 - 2y)
# deskew the solved ranges to be starting from zero
solution = arith.solve_linear_inequalities(problem, variables, ranges, deskew_range=True)
[x_new, y_new] = solution.dst.variables
[rel] = solution.dst.relations
assert ir.structural_equal(rel, (y_new * 2) + x_new <= 10)
assert ir.structural_equal(solution.dst.ranges[x_new].min, 0)
assert ir.structural_equal(solution.dst.ranges[x_new].extent, 11)
assert ir.structural_equal(solution.dst.ranges[y_new].min, 0)
assert ir.structural_equal(solution.dst.ranges[y_new].extent, 6)
assert ir.structural_equal(solution.src_to_dst[x], x_new + (y_new + 10))
assert ir.structural_equal(solution.src_to_dst[y], y_new)
assert ir.structural_equal(solution.dst_to_src[x_new], x - y - 10)
assert ir.structural_equal(solution.dst_to_src[y_new], y)
def test_equal():
x, y = te.var("x"), te.var("y")
problem = [
tvm.tir.GE(x + y, 10),
tvm.tir.GE(x - y, 2),
tvm.tir.LE(x, 6),
]
solution = arith.solve_linear_inequalities(problem, [x, y])
assert solution.ranges[x].min == 6
assert solution.ranges[x].extent == 1
assert solution.ranges[y].min == 4
assert solution.ranges[y].extent == 1
solution = arith.solve_linear_inequalities(problem, [x, y], deskew_range=True)
assert len(solution.dst.variables) == 0
assert len(solution.dst.ranges) == 0
assert len(solution.dst.relations) == 0
assert solution.src_to_dst[x] == 6
assert solution.src_to_dst[y] == 4
def test_multi_equal():
x, y, z = te.var("x"), te.var("y"), te.var("z")
problem = [
tvm.tir.LE(x, 6),
tvm.tir.GE(x, 6),
tvm.tir.GE(x - z * y, 0),
tvm.tir.LE(x - z * y, 0),
]
solution = arith.solve_linear_inequalities(problem, [x, y, z])
assert solution.ranges[x].min == 6
assert solution.ranges[x].extent == 1
assert len(solution.relations) == 3
assert ir.structural_equal(solution.relations[0], x == z * y)
assert isinstance(solution.relations[1], tvm.tir.LE)
assert solution.relations[1].b == 0
assert isinstance(solution.relations[2], tvm.tir.LE)
assert solution.relations[2].b == 0
# (z*y - 6) <= 0 && (6 - z*y) <= 0
ana = tvm.arith.Analyzer()
assert ana.simplify(solution.relations[1].a + solution.relations[2].a) == 0
assert ir.structural_equal(solution.relations[1].a, (z * y - 6)) or ir.structural_equal(
solution.relations[2].a, (z * y - 6)
)
solution = arith.solve_linear_inequalities(problem, [x, y, z], deskew_range=True)
assert solution.src_to_dst[y] == y
assert solution.src_to_dst[z] == z
assert solution.src_to_dst[x] == 6
def test_no_solution():
x = te.var("x0")
vranges = {x: tvm.ir.Range.from_min_extent(-20, 41)}
problem = [-x - 4 <= -5 * x + 2, x * 4 + 5 <= x * 5]
solution = arith.solve_linear_inequalities(problem, [x], vranges, deskew_range=True)
assert list(solution.dst.variables) == []
[rel] = solution.dst.relations
assert ir.structural_equal(rel, False)
assert len(solution.src_to_dst) == 0
assert len(solution.dst_to_src) == 0
solution = arith.solve_linear_inequalities(problem, [x], vranges)
assert len(solution.variables) == 0
assert len(solution.ranges) == 0
[rel] = solution.relations
assert not rel
def test_unbound_var_range():
x = te.var("x0")
free_var = te.var("fv")
vranges = {x: tvm.ir.Range.from_min_extent(0, tvm.tir.Cast("int32", 1 + tvm.tir.log(free_var)))}
problem = [x > 3]
solution = arith.solve_linear_inequalities(
problem,
[x],
vranges,
)
assert len(solution.variables) == 1
assert len(solution.ranges) == 0
assert len(solution.relations) == 3
if __name__ == "__main__":
tvm.testing.main()
| 7,719 | 34.740741 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_convert_for_loops_serial.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
from tvm.tir import stmt_functor
# fmt: off
@T.prim_func
def fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2(placeholder_30: T.handle, placeholder_31: T.handle, placeholder_32: T.handle, T_cast_8: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", "tir.noalias": True})
placeholder_33 = T.match_buffer(placeholder_30, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_34 = T.match_buffer(placeholder_31, [3072], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_35 = T.match_buffer(placeholder_32, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_9 = T.match_buffer(T_cast_8, [12544], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_3 = T.decl_buffer([150528], "int16")
for i0_i1_fused_3 in T.parallel(0, 28):
for i2_3, i3_3 in T.grid(28, 192):
PaddedInput_3[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3) ] = placeholder_33[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)]
for ax0_ax1_fused_ax2_fused_3 in T.parallel(0, 784):
for ax3_2 in T.serial(0, 16):
Conv2dOutput_3 = T.decl_buffer([1], "int32")
Conv2dOutput_3[0] = 0
for rc_3 in T.serial(0, 192):
Conv2dOutput_3[0] = (Conv2dOutput_3[0] + (T.cast(PaddedInput_3[((ax0_ax1_fused_ax2_fused_3*192) + rc_3)], "int32")*T.cast(placeholder_34[((rc_3*16) + ax3_2)], "int32")))
T_cast_9[((ax0_ax1_fused_ax2_fused_3*16) + ax3_2)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_3[0] + placeholder_35[ax3_2]), 1764006585, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
# fmt: on
def test_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2():
primfunc = fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2
mod = tvm.IRModule.from_expr(primfunc)
mod = tvm.tir.transform.ConvertForLoopsToSerial()(mod)
def verify_serial_loops(stmt):
if isinstance(stmt, tvm.tir.For):
assert stmt.kind == tvm.tir.ForKind.SERIAL
for _, primfunc in mod.functions.items():
stmt_functor.post_order_visit(primfunc.body, verify_serial_loops)
if __name__ == "__main__":
tvm.testing.main()
| 3,148 | 48.984127 | 219 | py |
tvm | tvm-main/tests/python/unittest/test_gen_requirements.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for gen_requirements, found in python/."""
import collections
import contextlib
import os
import sys
import tvm
import tvm.testing
import pytest
# Insert the parent dir to python/tvm into the import path, so that gen_requirements may be
# imported.
sys.path.insert(0, os.path.dirname(tvm.__file__))
try:
import gen_requirements
finally:
sys.path.pop(0)
@contextlib.contextmanager
def patch(obj, **kw):
old = {}
for prop_name, new in kw.items():
old[prop_name] = getattr(obj, prop_name)
setattr(obj, prop_name, new)
yield
for prop_name, value in old.items():
setattr(obj, prop_name, value)
PROBLEM_REQUIREMENTS = [
("extras-pre-core", ("", ["foo", 123])), # entry before core
(456, ("", ["foo", "bar"])), # invalid extras name, deps should not be processed
("core", ("", ["foo"])), # ordinary core entry.
("wrong-description-type", (None, ["foo"])), # wrong description type
("bad-value", None), # value field is not a 2-tuple
("bad-value-2", ("", ["foo"], 34)), # value field is not a 2-tuple
("invalid", ("", ["qux"])), # duplicate invalid entry, all items valid.
("extras-foo", ("", ["bar", "baz"])), # ordinary extras entry.
("invalid", ("", ["baz", None, 123])), # valid extra name, invalid deps.
("unsorted", ("", ["qux", "bar", "foo"])), # deps out of order
("versioned_dep", ("", ["baz==1.2", "foo==^2.0", "buz<3", "bar>4"])),
("duplicate_dep", ("", ["buz", "buz", "foo"])), # duplicate listed dependency
("dev", ("", ["baz", "qux"])), # ordinary dev entry.
("extras-post-dev", ("", ["bar", "buzz"])), # entry after dev
]
def test_validate_requirements():
with patch(gen_requirements, REQUIREMENTS_BY_PIECE=None):
assert gen_requirements.validate_requirements_by_piece() == [
"must be list or tuple, see None"
]
with patch(gen_requirements, REQUIREMENTS_BY_PIECE=PROBLEM_REQUIREMENTS):
problems = gen_requirements.validate_requirements_by_piece()
assert problems == [
'piece extras-pre-core: must list after "core" (core must be first)',
"piece extras-pre-core: deps should be a list of strings, got ['foo', 123]",
"piece 456: must be str",
"piece wrong-description-type: description should be a string, got None",
(
'piece bad-value: should be formatted like ("bad-value", ("<requirements.txt '
'comment>", ["dep1", "dep2", ...])). got: None'
),
(
'piece bad-value-2: should be formatted like ("bad-value-2", '
'("<requirements.txt comment>", ["dep1", "dep2", ...])). got: (\'\', '
"['foo'], 34)"
),
"piece invalid: listed twice",
"piece invalid: deps should be a list of strings, got ['baz', None, 123]",
"piece unsorted: deps must be sorted. Correct order:\n ['bar', 'foo', 'qux']",
"piece versioned_dep: deps must be sorted. Correct order:\n ['bar>4', 'baz==1.2', 'buz<3', 'foo==^2.0']",
"piece versioned_dep: dependency baz==1.2 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency foo==^2.0 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency buz<3 should not specify a version. Add it to CONSTRAINTS instead.",
"piece versioned_dep: dependency bar>4 should not specify a version. Add it to CONSTRAINTS instead.",
"piece duplicate_dep: dependency buz listed twice",
'piece extras-post-dev: must list before "dev" (dev must be last)',
'pieces other than "core" and "dev" must appear in alphabetical order: '
"['bad-value', 'bad-value-2', 'duplicate_dep', 'extras-foo', 'extras-post-dev', "
"'extras-pre-core', 'invalid', 'invalid', 'unsorted', 'versioned_dep', "
"'wrong-description-type']",
]
TEST_REQUIREMENTS_BY_PIECE = (
("core", ("core tvm requirements", ("bar", "foo", "non-constrained"))),
("extra-one", ("requirements for one feature", ("baz", "qux"))),
("extra-two", ("requirements for two feature", ("buz", "qux", "semver-minor", "semver-patch"))),
("dev", ("requirements for dev", ("buz", "oof", "rab"))),
)
def test_validate_constraints():
with patch(
gen_requirements,
REQUIREMENTS_BY_PIECE=TEST_REQUIREMENTS_BY_PIECE,
CONSTRAINTS=(
("unlisted", "~=3"),
("double-specified", "<2"),
(
"double-specified",
"==3",
),
("bad-constraint", "1.2.0"),
("bad-semver-constraint", "i don't match the regex :P"),
("alpha-semver-constraint", "^foo.bar.23"),
),
):
problems = gen_requirements.validate_constraints()
assert problems == [
"unlisted: not specified in REQUIREMENTS_BY_PIECE",
"double-specified: not specified in REQUIREMENTS_BY_PIECE",
"double-specified: specified twice",
"double-specified: not specified in REQUIREMENTS_BY_PIECE",
"bad-constraint: not specified in REQUIREMENTS_BY_PIECE",
'bad-constraint: constraint "1.2.0" does not look like a valid constraint',
"bad-semver-constraint: not specified in REQUIREMENTS_BY_PIECE",
'bad-semver-constraint: constraint "i don\'t match the regex :P" does not look like a valid constraint',
"alpha-semver-constraint: not specified in REQUIREMENTS_BY_PIECE",
"alpha-semver-constraint: invalid semver constraint ^foo.bar.23",
"CONSTRAINTS entries should be in this sorted order: ['alpha-semver-constraint', 'bad-constraint', 'bad-semver-constraint', 'double-specified', 'double-specified', 'unlisted']",
]
TEST_CONSTRAINTS = (
("bar", "==1.0"),
("baz", ">2.3"),
("buz", "^1.3.0"),
("non-constrained", None), # Support a comment.
("oof", "==0.3.4"),
("qux", "~=1.2.4"),
("semver-minor", "^0.2.2-patch2.post3+buildmeta"), # Ensure prerelease and buildmeta preserved.
("semver-patch", "^0.0.2+bm"), # Ensure postrelease preserved.
)
def test_join_requirements():
with patch(
gen_requirements,
REQUIREMENTS_BY_PIECE=TEST_REQUIREMENTS_BY_PIECE,
CONSTRAINTS=TEST_CONSTRAINTS,
):
requirements = gen_requirements.join_requirements()
assert requirements == collections.OrderedDict(
[
("core", ("core tvm requirements", ["bar==1.0", "foo", "non-constrained"])),
("extra-one", ("requirements for one feature", ["baz>2.3", "qux~=1.2.4"])),
(
"extra-two",
(
"requirements for two feature",
[
"buz>=1.3.0,<2.0.0",
"qux~=1.2.4",
"semver-minor>=0.2.2-patch2.post3+buildmeta,<0.3.0",
"semver-patch>=0.0.2+bm,<0.0.3",
],
),
),
("dev", ("requirements for dev", ["buz>=1.3.0,<2.0.0", "oof==0.3.4", "rab"])),
(
"all-prod",
(
"Combined dependencies for all TVM pieces, excluding dev",
[
"bar==1.0",
"baz>2.3",
"buz>=1.3.0,<2.0.0",
"foo",
"non-constrained",
"qux~=1.2.4",
"semver-minor>=0.2.2-patch2.post3+buildmeta,<0.3.0",
"semver-patch>=0.0.2+bm,<0.0.3",
],
),
),
]
)
def test_semver():
problems = []
assert gen_requirements.parse_semver("C", "^1.2.0", problems) == (["1", "2", "0"], 0, 1)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.2.0", problems) == (["0", "2", "0"], 1, 2)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.0.0", problems) == (["0", "0", "0"], 0, 0)
assert problems == []
assert gen_requirements.parse_semver("C", "^0.a.0", problems) == ([], 0, 0)
assert problems == ["C: invalid semver constraint ^0.a.0"]
if __name__ == "__main__":
tvm.testing.main()
| 9,412 | 41.400901 | 189 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_read_write_at.py | # or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks,not-callable
@T.prim_func
def cuda_matmul(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=undefined-loop-variable
A = T.match_buffer(a, [2048, 2048], "float32")
B = T.match_buffer(b, [2048, 2048], "float32")
C = T.match_buffer(c, [2048, 2048], "float32")
for by in T.thread_binding(0, 32, thread = "blockIdx.y"):
for bx in T.thread_binding(0, 32, thread = "blockIdx.x"):
for vy in T.thread_binding(0, 2, thread = "vthread.y"):
for vx in T.thread_binding(0, 2, thread = "vthread.x"):
for ty in T.thread_binding(0, 8, thread = "threadIdx.y"):
for tx in T.thread_binding(0, 8, thread = "threadIdx.x"):
for k0 in T.serial(0, 256):
for k1 in T.unroll(0, 8):
for _, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
T.reads([C[vi, vj], A[vi, vk], B[vk, vj]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@T.prim_func
def cuda_matmul_read_at_a(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048], dtype="float32")
C = T.match_buffer(c, [2048, 2048], dtype="float32")
A_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
for by in T.thread_binding(0, 32, thread="blockIdx.y"):
for bx in T.thread_binding(0, 32, thread="blockIdx.x"):
for vy in T.thread_binding(0, 2, thread="vthread.y"):
for vx in T.thread_binding(0, 2, thread="vthread.x"):
for ty in T.thread_binding(0, 8, thread="threadIdx.y"):
for tx in T.thread_binding(0, 8, thread="threadIdx.x"):
for k0 in T.serial(0, 256):
with T.block("A_shared"):
v0 = T.axis.S(32, by)
v1 = T.axis.S(256, k0)
T.reads([A[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.writes([A_shared[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(64, 8):
A_shared[v0 * 64 + ax0, v1 * 8 + ax1] = A[v0 * 64 + ax0, v1 * 8 + ax1]
for k1 in T.unroll(0, 8):
for v_, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
T.reads([C[vi, vj], A_shared[vi, vk], B[vk, vj]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A_shared[vi, vk] * B[vk, vj]
@T.prim_func
def cuda_matmul_read_at_ab(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048], dtype="float32")
C = T.match_buffer(c, [2048, 2048], dtype="float32")
A_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
for by in T.thread_binding(0, 32, thread="blockIdx.y"):
for bx in T.thread_binding(0, 32, thread="blockIdx.x"):
for vy in T.thread_binding(0, 2, thread="vthread.y"):
for vx in T.thread_binding(0, 2, thread="vthread.x"):
for ty in T.thread_binding(0, 8, thread="threadIdx.y"):
for tx in T.thread_binding(0, 8, thread="threadIdx.x"):
for k0 in T.serial(0, 256):
with T.block("A_shared"):
v0 = T.axis.S(32, by)
v1 = T.axis.S(256, k0)
T.reads([A[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.writes([A_shared[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(64, 8):
A_shared[v0 * 64 + ax0, v1 * 8 + ax1] = A[v0 * 64 + ax0, v1 * 8 + ax1]
with T.block("B_shared"):
v0 = T.axis.S(256, k0)
v1 = T.axis.S(32, bx)
T.reads([B[v0 * 8 : v0 * 8 + 8, v1 * 64 : v1 * 64 + 64]])
T.writes([B_shared[v0 * 8 : v0 * 8 + 8, v1 * 64 : v1 * 64 + 64]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(8, 64):
B_shared[v0 * 8 + ax0, v1 * 64 + ax1] = B[v0 * 8 + ax0, v1 * 64 + ax1]
for k1 in T.unroll(0, 8):
for v_, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
T.reads([C[vi, vj], A_shared[vi, vk], B_shared[vk, vj]])
T.writes([C[vi, vj]])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A_shared[vi, vk] * B_shared[vk, vj]
@T.prim_func
def cuda_matmul_write_at_c(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [2048, 2048], dtype="float32")
B = T.match_buffer(b, [2048, 2048], dtype="float32")
C = T.match_buffer(c, [2048, 2048], dtype="float32")
A_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
C_shared = T.alloc_buffer([2048, 2048], dtype="float32", scope="shared")
for by in T.thread_binding(0, 32, thread="blockIdx.y"):
for bx in T.thread_binding(0, 32, thread="blockIdx.x"):
for vy in T.thread_binding(0, 2, thread="vthread.y"):
for vx in T.thread_binding(0, 2, thread="vthread.x"):
for ty in T.thread_binding(0, 8, thread="threadIdx.y"):
for tx in T.thread_binding(0, 8, thread="threadIdx.x"):
for k0 in T.serial(0, 256):
with T.block("A_shared"):
v0 = T.axis.S(32, by)
v1 = T.axis.S(256, k0)
T.reads([A[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.writes([A_shared[v0 * 64 : v0 * 64 + 64, v1 * 8 : v1 * 8 + 8]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(64, 8):
A_shared[v0 * 64 + ax0, v1 * 8 + ax1] = A[v0 * 64 + ax0, v1 * 8 + ax1]
with T.block("B_shared"):
v0 = T.axis.S(256, k0)
v1 = T.axis.S(32, bx)
T.reads([B[v0 * 8 : v0 * 8 + 8, v1 * 64 : v1 * 64 + 64]])
T.writes([B_shared[v0 * 8 : v0 * 8 + 8, v1 * 64 : v1 * 64 + 64]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(8, 64):
B_shared[v0 * 8 + ax0, v1 * 64 + ax1] = B[v0 * 8 + ax0, v1 * 64 + ax1]
for k1 in T.unroll(0, 8):
for v_, i, j in T.grid(1, 4, 4):
with T.block("C"):
vi = T.axis.S(2048, by * 64 + vy * 32 + ty * 4 + i)
vj = T.axis.S(2048, bx * 64 + vx * 32 + tx * 4 + j)
vk = T.axis.R(2048, k0 * 8 + k1)
T.reads([C_shared[vi, vj], A_shared[vi, vk], B_shared[vk, vj]])
T.writes([C_shared[vi, vj]])
with T.init():
C_shared[vi, vj] = T.float32(0)
C_shared[vi, vj] = C_shared[vi, vj] + A_shared[vi, vk] * B_shared[vk, vj]
with T.block("C_shared"):
v0 = T.axis.S(32, by)
v1 = T.axis.S(32, bx)
T.reads([C_shared[v0 * 64 : v0 * 64 + 64, v1 * 64 : v1 * 64 + 64]])
T.writes([C[v0 * 64 : v0 * 64 + 64, v1 * 64 : v1 * 64 + 64]])
T.block_attr({"auto_copy":1})
for ax0, ax1 in T.grid(64, 64):
C[v0 * 64 + ax0, v1 * 64 + ax1] = C_shared[v0 * 64 + ax0, v1 * 64 + ax1]
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks,not-callable
# fmt: on
def test_read_at_global_to_shared_a():
sch = tir.Schedule(cuda_matmul, debug_mask="all")
block = sch.get_block("C")
# pylint: disable=invalid-name
_by, _bx, _vy, _vx, _ty, _tx, k0, _k1, _, _i, _j = sch.get_loops(block)
# pylint: enable=invalid-name
sch.read_at(k0, block, 1, "shared")
tvm.ir.assert_structural_equal(sch.mod["main"], cuda_matmul_read_at_a)
verify_trace_roundtrip(sch, cuda_matmul)
def test_read_at_global_to_shared_ab():
sch = tir.Schedule(cuda_matmul_read_at_a, debug_mask="all")
block = sch.get_block("C")
# pylint: disable=invalid-name
_by, _bx, _vy, _vx, _ty, _tx, k0, _k1, _, _i, _j = sch.get_loops(block)
# pylint: enable=invalid-name
sch.read_at(k0, block, 2, "shared")
tvm.ir.assert_structural_equal(sch.mod["main"], cuda_matmul_read_at_ab)
verify_trace_roundtrip(sch, cuda_matmul_read_at_a)
def test_read_at_local_to_shared_c():
sch = tir.Schedule(cuda_matmul_read_at_ab, debug_mask="all")
block = sch.get_block("C")
# pylint: disable=invalid-name
_by, _bx, _vy, _vx, _ty, tx, _k0, _k1, _, _i, _j = sch.get_loops(block)
# pylint: enable=invalid-name
sch.write_at(tx, block, 0, "shared")
tvm.ir.assert_structural_equal(sch.mod["main"], cuda_matmul_write_at_c)
verify_trace_roundtrip(sch, cuda_matmul_read_at_ab)
if __name__ == "__main__":
tvm.testing.main()
| 13,158 | 58.274775 | 150 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_cache_index.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
########## Function before schedule ##########
@T.prim_func
def resize(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (1, 3, 40, 40))
B = T.match_buffer(b, (1, 3, 80, 80))
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
B[n, c, vi, vj] = A[n, c, vi // 4 + vj // 4, vj // 2]
@T.prim_func
def resize_cache_index(
A: T.Buffer((1, 3, 40, 40), "float32"), B: T.Buffer((1, 3, 80, 80), "float32")
) -> None:
index_var_0 = T.alloc_buffer([80, 80], dtype="int32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0, ax1 in T.grid(80, 80):
with T.block("index_0"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads()
T.writes(index_var_0[v0, v1])
index_var_0[v0, v1] = v0 // 4 + v1 // 4
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = v0 // 2
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("A"):
n, c, vi, vj = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[n, c, vi // 4 + vj // 4, vj // 2])
T.writes(B[n, c, vi, vj])
B[n, c, vi, vj] = A[n, c, index_var_0[vi, vj], index_var_1[vj]]
@T.prim_func
def bilinear_resize(
x: T.Buffer((1, 3, 40, 40), "float16"), resize: T.Buffer((1, 3, 80, 80), "float16")
):
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("resize"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(x[i0_1, i1_1, 0:40, 0:40])
T.writes(resize[i0_1, i1_1, i2_1, i3_1])
resize[i0_1, i1_1, i2_1, i3_1] = T.Cast(
"float16",
(
T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
39,
),
0,
),
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
39,
),
0,
),
],
)
* (
T.float32(1)
- (
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
)
+ T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
39,
),
0,
),
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
)
+ 1,
39,
),
0,
),
],
)
* (
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
)
* (
T.float32(1)
- (
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
)
+ (
T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
)
+ 1,
39,
),
0,
),
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
39,
),
0,
),
],
)
* (
T.float32(1)
- (
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
)
+ T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
)
+ 1,
39,
),
0,
),
T.max(
T.min(
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5))
* T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
)
+ 1,
39,
),
0,
),
],
)
* (
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i3_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
)
* (
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
),
)
@T.prim_func
def cached_bilinear_resize(
x: T.Buffer((1, 3, 40, 40), "float16"), resize: T.Buffer((1, 3, 80, 80), "float16")
):
index_var_0 = T.alloc_buffer([80], dtype="float32", strides=[1])
index_var_1 = T.alloc_buffer([80], dtype="int32", strides=[1])
index_var_2 = T.alloc_buffer([80], dtype="int32", strides=[1])
for ax0 in T.serial(80):
with T.block("index_0"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_0[v0])
index_var_0[v0] = (
(T.Cast("float32", v0) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast(
"float32",
T.Cast(
"int32",
T.floor(
(T.Cast("float32", v0) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5),
dtype="float32",
),
),
)
)
for ax0 in T.serial(80):
with T.block("index_1"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_1[v0])
index_var_1[v0] = T.Cast(
"int32",
T.floor(
(T.Cast("float32", v0) + T.float32(0.5)) * T.float32(0.5) - T.float32(0.5),
dtype="float32",
),
)
for ax0 in T.serial(80):
with T.block("index_2"):
v0 = T.axis.spatial(80, ax0)
T.reads()
T.writes(index_var_2[v0])
index_var_2[v0] = T.Cast(
"int32",
T.floor(
(T.Cast("float32", v0) + T.float32(0.5)) * T.float32(0.5) - T.float32(0.5),
dtype="float32",
),
)
for i0, i1, i2, i3 in T.grid(1, 3, 80, 80):
with T.block("resize"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(x[i0_1, i1_1, 0:40, 0:40])
T.writes(resize[i0_1, i1_1, i2_1, i3_1])
resize[i0_1, i1_1, i2_1, i3_1] = T.Cast(
"float16",
(
T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(T.min(index_var_1[i2_1], 39), 0),
T.max(T.min(index_var_2[i3_1], 39), 0),
],
)
* (T.float32(1) - index_var_0[i3_1])
+ T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(T.min(index_var_1[i2_1], 39), 0),
T.max(T.min(index_var_2[i3_1] + 1, 39), 0),
],
)
* index_var_0[i3_1]
)
* (
T.float32(1)
- (
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast("float32", index_var_1[i2_1])
)
)
+ (
T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(T.min(index_var_1[i2_1] + 1, 39), 0),
T.max(T.min(index_var_2[i3_1], 39), 0),
],
)
* (T.float32(1) - index_var_0[i3_1])
+ T.Cast(
"float32",
x[
i0_1,
i1_1,
T.max(T.min(index_var_1[i2_1] + 1, 39), 0),
T.max(T.min(index_var_2[i3_1] + 1, 39), 0),
],
)
* index_var_0[i3_1]
)
* (
(T.Cast("float32", i2_1) + T.float32(0.5)) * T.float32(0.5)
- T.float32(0.5)
- T.Cast("float32", index_var_1[i2_1])
),
)
def test_basic_cache_index():
sch = tvm.tir.Schedule(resize, debug_mask="all")
block = sch.get_block("A")
sch.cache_index(block, "global")
tvm.ir.assert_structural_equal(resize_cache_index, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=resize)
def test_resize_bilinear_cache_index():
sch = tvm.tir.Schedule(bilinear_resize, debug_mask="all")
block = sch.get_block("resize")
sch.cache_index(block, "global", 4)
tvm.ir.assert_structural_equal(sch.mod["main"], cached_bilinear_resize)
verify_trace_roundtrip(sch=sch, mod=bilinear_resize)
if __name__ == "__main__":
tvm.testing.main()
| 19,016 | 39.205074 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_make_packed_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T, ir as I
from tvm.driver.build_module import schedule_to_module
def test_makeapi():
"""Not yet working, mock design"""
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = te.create_schedule(C.op)
mod = schedule_to_module(s, [n, A, B, C])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr(
{
"target": tvm.target.Target("llvm", host="llvm"),
"global_symbol": "main",
}
)
)(mod)
before = mod
after = tvm.tir.transform.MakePackedAPI()(mod)
f = after["main"]
assert len(f.params) == 6
def _find_assignment(stmt, var_name):
while not isinstance(stmt, tvm.tir.LetStmt):
stmt = stmt.body
if stmt.var.name != var_name:
return _find_assignment(stmt.body, var_name)
return stmt
def _find_next(stmt, type):
search_stack = [stmt]
while search_stack:
stmt = search_stack.pop()
if isinstance(stmt, type):
return stmt
elif isinstance(stmt, tvm.tir.SeqStmt):
search_stack.extend(reversed(stmt))
else:
search_stack.append(stmt.body)
return None
def _find_compute_scope(func):
result = None
def _visitor(stmt):
if isinstance(stmt, tir.AttrStmt) and stmt.attr_key == "compute_scope":
nonlocal result
result = stmt
tir.stmt_functor.post_order_visit(func.body, _visitor)
return result
def test_variable_passed_from_args():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
not_device_context = tvm.tir.Var("not_device_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, not_device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, not_device_context], stmt))
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target("llvm", host="llvm"))
)(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
# num_args assertion
assert func.body.condition.a == num_args
assert func.body.condition.b == 2
# Arguments unpacking
assignment = _find_assignment(func.body, "input_buffer")
assert str(assignment.value) == 'T.tvm_struct_get(args, 0, 12, "handle")'
assignment = _find_assignment(assignment.body, "input_buffer")
assert str(assignment.value) == 'T.tvm_struct_get(input_buffer, 0, 1, "handle")'
unpacked_input_buffer = assignment.var
assignment = _find_assignment(func.body, "not_device_context")
assert str(assignment.value) == 'T.tvm_struct_get(args, 1, 12, "handle")'
unpacked_not_device_context = assignment.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == unpacked_not_device_context
def test_device_api_context_implicit_resource_handle():
ib = tvm.tir.ir_builder.create()
input_buffer = tvm.tir.decl_buffer(name="input_buffer", shape=[1])
device_context = tvm.tir.Var("device_api_context", dtype="handle")
ib.emit(
tvm.tir.call_extern("float32", "some_external_call", input_buffer.data, device_context),
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([input_buffer, device_context], stmt))
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target("llvm", host="llvm"))
)(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
func = tvm.tir.transform.MakePackedAPI()(mod)["main"]
num_args = func.params[2]
device_context_in_resource_handle = func.params[5]
# num_args assertion
assert func.body.condition.a == num_args
assert func.body.condition.b == 1
# Arguments unpacking
assignment = _find_assignment(func.body, "input_buffer")
assert str(assignment.value) == 'T.tvm_struct_get(args, 0, 12, "handle")'
assignment = _find_assignment(assignment.body, "input_buffer")
assert str(assignment.value) == 'T.tvm_struct_get(input_buffer, 0, 1, "handle")'
unpacked_input_buffer = assignment.var
seq_stmt = _find_next(assignment, tvm.tir.SeqStmt)
call = _find_next(seq_stmt[1], tvm.tir.Evaluate)
call_extern = call.value
assert call_extern.args[1] == unpacked_input_buffer
assert call_extern.args[2] == device_context_in_resource_handle
@pytest.mark.parametrize("use_global_symbol", [True, False])
def test_no_op_when_global_symbol_is_absent(use_global_symbol):
func_attr = {"target": tvm.target.Target("llvm", host="llvm")}
if use_global_symbol:
func_attr["global_symbol"] = "main"
@T.prim_func
def before():
T.func_attr(func_attr)
T.evaluate(0)
after = tvm.tir.transform.MakePackedAPI()(tvm.IRModule.from_expr(before))["main"]
if use_global_symbol:
assert len(after.params) == 6
else:
tvm.ir.assert_structural_equal(before, after)
def test_target_host_removed():
"""After MakePackedAPI, host-side target should be the host
MakePackedAPI is the last transform that requires both the device
and the host. After MakePackedAPI, the target attribute should
only contain the host-side target.
"""
host = tvm.target.Target("llvm")
@I.ir_module
class before:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("cuda", host=host)})
T.evaluate(0)
after = tvm.tir.transform.MakePackedAPI()(before)
target_attr = after["main"].attrs["target"]
assert str(host) == str(target_attr)
def test_internal_subroutine_call():
"""Internal subroutines should not use the PackedFunc API
A subroutine without the "global_symbol" attribute is an internal
subroutine, and is not directly exposed to a user of the generated
`runtime.Module`. Therefore, it doesn't need to follow the
PackedFunc API.
"""
@I.ir_module
class before:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("llvm", host="llvm")})
before.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"target": T.target("llvm")})
T.evaluate(A_data)
after = tvm.tir.transform.MakePackedAPI()(before)
tvm.ir.assert_structural_equal(before["subroutine"], after["subroutine"])
compute_scope = _find_compute_scope(after["main"])
subroutine_call_op = compute_scope.body.value.op
assert isinstance(subroutine_call_op, tvm.ir.GlobalVar), (
f"The main function's CallNode should use the subroutine's GLobalVar as the operation, "
f"but instead has an operation of type {subroutine_call_op}"
)
def test_subroutine_call_to_externally_visible_subroutine():
"""Externally-visible subroutines should use the PackedFunc API
Because the subroutine may be called directly by a user, it must
use the PackedFunc API. Its signature should be updated to the
PackedFunc signature, and call sites should be updated to use
`T.tvm_call_cpacked`.
"""
@I.ir_module
class before:
@T.prim_func
def main(A: T.Buffer(1, "float32")):
T.func_attr({"global_symbol": "main", "target": T.target("llvm", host="llvm")})
before.subroutine(A.data)
@T.prim_func
def subroutine(A_data: T.handle("float32")):
T.func_attr({"global_symbol": "subroutine", "target": T.target("llvm", host="llvm")})
T.evaluate(A_data)
after = tvm.tir.transform.MakePackedAPI()(before)
main_compute_scope = _find_compute_scope(after["main"])
assert main_compute_scope is not None
subroutine_compute_scope = _find_compute_scope(after["subroutine"])
assert subroutine_compute_scope is not None
subroutine_call_op = main_compute_scope.body.value.op
assert (
isinstance(subroutine_call_op, tvm.ir.Op)
and subroutine_call_op.name == "tir.tvm_call_cpacked"
), (
f"The main function's CallNode should be lowered to the builtin 'tir.tvm_call_cpacked', "
f"but instead has an operation of type {subroutine_call_op}"
)
if __name__ == "__main__":
test_makeapi()
| 9,699 | 32.797909 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_space_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule SpaceGenerator """
# pylint: disable=missing-function-docstring
import math
import pytest
import tvm
import tvm.testing
from tvm._ffi.base import TVMError
from tvm.meta_schedule.space_generator import (
PySpaceGenerator,
ScheduleFn,
SpaceGeneratorUnion,
)
from tvm.meta_schedule.tune_context import TuneContext
from tvm.meta_schedule.utils import derived_object
from tvm.script import tir as T
from tvm.tir.schedule import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def schedule_matmul(sch: Schedule):
block = sch.get_block("matmul")
i, j, k = sch.get_loops(block=block)
# TODO(@zxybazh): Change to `sample_perfect_tile` after upstreaming
i_0, i_1, i_2, i_3 = sch.split(loop=i, factors=[2, 4, 64, 2])
j_0, j_1, j_2, j_3 = sch.split(loop=j, factors=[4, 64, 2, 2])
k_0, k_1 = sch.split(loop=k, factors=[32, 32])
sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
def _check_correct(schedule: Schedule):
trace = schedule.trace
for inst in trace.decisions:
assert math.prod(trace.decisions[inst]) == 1024
def test_meta_schedule_space_generator_schedule_fn():
mod = Matmul
space_generator = ScheduleFn(sch_fn=schedule_matmul)
design_spaces = space_generator.generate_design_space(mod)
assert len(design_spaces) == 1
(schedule,) = design_spaces
_check_correct(schedule)
def test_meta_schedule_design_space_generator_union():
mod = Matmul
space_generator = ScheduleFn(sch_fn=schedule_matmul)
space_generator_union = SpaceGeneratorUnion([space_generator, space_generator])
design_spaces = space_generator_union.generate_design_space(mod)
assert len(design_spaces) == 2
for design_space in design_spaces:
_check_correct(design_space)
def test_meta_schedule_design_space_generator_NIE():
@derived_object
class TestPySpaceGenerator(PySpaceGenerator):
def __init__(self):
super().__init__()
self.sch_rules = []
self.postprocs = []
self.mutator_probs = {}
with pytest.raises(
TVMError, match="PySpaceGenerator's InitializeWithTuneContext method not implemented!"
):
generator = TestPySpaceGenerator()
generator._initialize_with_tune_context(TuneContext())
if __name__ == "__main__":
tvm.testing.main()
| 3,914 | 34.27027 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_calculate_workspace.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
# fmt: off
@T.prim_func
def primfunc_global_allocates(placeholder_144: T.handle, placeholder_145: T.handle, placeholder_146: T.handle, T_cast_48: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_13", "tir.noalias": True})
placeholder_147 = T.match_buffer(placeholder_144, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_148 = T.match_buffer(placeholder_145, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_149 = T.match_buffer(placeholder_146, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_49 = T.match_buffer(T_cast_48, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_22 = T.decl_buffer([131072], "int16")
DepthwiseConv2d_9 = T.decl_buffer([100352], "int32")
for i1_29, i2_39, i3_40 in T.grid(16, 16, 512):
PaddedInput_22[(((i1_29*8192) + (i2_39*512)) + i3_40)] = T.if_then_else(((((1 <= i1_29) and (i1_29 < 15)) and (1 <= i2_39)) and (i2_39 < 15)), placeholder_147[((((i1_29*7168) + (i2_39*512)) + i3_40) - 7680)], T.int16(0), dtype="int16")
for i_9, j_9, c_9 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = 0
for di_9, dj_9 in T.grid(3, 3):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = (DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] + (PaddedInput_22[(((((i_9*8192) + (di_9*8192)) + (j_9*512)) + (dj_9*512)) + c_9)].astype("int32")*placeholder_148[(((di_9*1536) + (dj_9*512)) + c_9)].astype("int32")))
for ax1_27, ax2_28, ax3_30 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] = (DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] + placeholder_149[ax3_30])
for i1_30, i2_40, i3_41 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)] = T.q_multiply_shift(DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)], 1269068532, 31, -4, dtype="int32")
for i1_31, i2_41, i3_42 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)] = T.max(T.max(DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)], 255), 0)
for ax1_28, ax2_29, ax3_31 in T.grid(14, 14, 512):
PaddedInput_22[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)] = DepthwiseConv2d_9[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)].astype("uint8")
for ax1_29, ax2_30, ax3_32 in T.grid(14, 14, 512):
T_cast_49[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)] = PaddedInput_22[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)].astype("int16")
# fmt: on
# fmt: off
@T.prim_func
def primfunc_local_allocates(placeholder_162: T.handle, placeholder_163: T.handle, placeholder_164: T.handle, T_cast_76: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_9", "tir.noalias": True})
placeholder_165 = T.match_buffer(placeholder_162, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_166 = T.match_buffer(placeholder_163, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_167 = T.match_buffer(placeholder_164, [512], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_77 = T.match_buffer(T_cast_76, [100352], dtype="int16", elem_offset=0, align=64, offset_factor=1)
sid_21 = T.allocate_const([0,1,2,3,4,5,6,7], "int8", [8])
# body
PaddedInput_25 = T.decl_buffer([131072], "int16")
for i1_35, i2_46, i3_47 in T.grid(16, 16, 512):
PaddedInput_25[(((i1_35*8192) + (i2_46*512)) + i3_47)] = T.if_then_else(((((1 <= i1_35) and (i1_35 < 15)) and (1 <= i2_46)) and (i2_46 < 15)), placeholder_165[((((i1_35*7168) + (i2_46*512)) + i3_47) - 7680)], T.int16(0), dtype="int16")
T_add_11 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as DepthwiseConv2d_11:
for i_11, j_11, c_11 in T.grid(14, 14, 512):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = 0
for di_11, dj_11 in T.grid(3, 3):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = (DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] + (PaddedInput_25[(((((i_11*8192) + (di_11*8192)) + (j_11*512)) + (dj_11*512)) + c_11)].astype("int32")*placeholder_166[(((di_11*1536) + (dj_11*512)) + c_11)].astype("int32")))
for ax1_44, ax2_45, ax3_47 in T.grid(14, 14, 512):
T_add_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] = (DepthwiseConv2d_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] + placeholder_167[ax3_47])
compute_22 = T.decl_buffer([100352], "int32")
with T.decl_buffer([100352], "int32") as T_cast_78:
for ax1_45, ax2_46, ax3_48 in T.grid(14, 14, 512):
T_cast_78[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)] = T_add_11[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)]
for i1_36, i2_47, i3_48 in T.grid(14, 14, 512):
compute_22[(((i1_36*7168) + (i2_47*512)) + i3_48)] = T.q_multiply_shift(T_cast_78[(((i1_36*7168) + (i2_47*512)) + i3_48)], 1948805937, 31, -5, dtype="int32")
T_cast_79 = T.decl_buffer([100352], "uint8")
with T.decl_buffer([100352], "int32") as compute_23:
for i1_37, i2_48, i3_49 in T.grid(14, 14, 512):
compute_23[(((i1_37*7168) + (i2_48*512)) + i3_49)] = T.max(T.max(compute_22[(((i1_37*7168) + (i2_48*512)) + i3_49)], 255), 0)
for ax1_46, ax2_47, ax3_49 in T.grid(14, 14, 512):
T_cast_79[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)] = compute_23[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)].astype("uint8")
for ax1_47, ax2_48, ax3_50 in T.grid(14, 14, 512):
T_cast_77[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)] = T_cast_79[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)].astype("int16")
# fmt: on
@pytest.mark.parametrize("alignment,size,consts", [(1, 663552, 0), (10, 663560, 0)])
def test_global_allocates(alignment, size, consts):
primfunc = primfunc_global_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
@pytest.mark.parametrize("alignment,size,consts", [(1, 1566720, 8), (100, 1567100, 100)])
def test_local_allocates(alignment, size, consts):
primfunc = primfunc_local_allocates
assert tvm.tir.analysis.calculate_constant_bytes(primfunc, alignment) == consts
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
if __name__ == "__main__":
test_global_allocates()
test_local_allocates()
| 7,545 | 66.981982 | 307 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lower_cross_thread_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def _check(original, transformed):
mod = tvm.IRModule.from_expr(original)
mod = tvm.tir.transform.LowerCrossThreadReduction()(mod)
tvm.ir.assert_structural_equal(mod["main"], transformed, True)
def _check_fail(original):
mod = tvm.IRModule.from_expr(original)
with pytest.raises(ValueError):
tvm.tir.transform.LowerCrossThreadReduction()(mod)
@T.prim_func
def loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_loop_split(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.S(128, i)
vk = T.axis.R(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.S(128, i)
T.where(ki == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_no_normal_reduction(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.where(k == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_two_bound_loops(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ko in T.thread_binding(0, 4, thread="threadIdx.x"):
for ki in T.thread_binding(0, 32, thread="threadIdx.y"):
with T.block("B_cross_thread_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(128, ko * 32 + ki)
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], ko, ki, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.where(ko == 0 and ki == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + B_rf_local[vk0, vi]
@T.prim_func
def lowered_multiple_blocks_under_reduction_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16, 16], dtype="float32")
B = T.match_buffer(b, [16], dtype="float32")
B_rf_local = T.alloc_buffer([16, 16], dtype="float32", scope="local")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.thread_binding(0, 16, thread="blockIdx.x"):
for k0o in T.thread_binding(0, 4, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for k0i0, k1 in T.grid(4, 16):
with T.block("B_rf"):
vk0 = T.axis.spatial(16, k0o * 4 + k0i0)
vi, vk1 = T.axis.remap("SR", [i, k1])
T.reads([A[vi, vk0, vk1]])
T.writes([B_rf_local[vk0, vi]])
with T.init():
B_rf_local[vk0, vi] = T.float32(0)
B_rf_local[vk0, vi] = B_rf_local[vk0, vi] + A[vi, vk0, vk1]
for k0i1 in T.serial(0, 4):
with T.block("B_normal_reduction"):
vk0 = T.axis.reduce(16, k0o * 4 + k0i1)
vi = T.axis.spatial(16, i)
T.reads([B_rf_local[vk0, vi]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + B_rf_local[vk0, vi]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
k0o,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(16, i)
T.where(k0o == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i, ko in T.grid(128, 4):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def lowered_with_block_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 120], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for ki in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("B_in_thread_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.float32(0)
for ko in T.serial(0, 4):
with T.block("B_normal_reduction"):
vi = T.axis.spatial(128, i)
vk = T.axis.reduce(120, ko * 32 + ki)
T.where(ko * 32 + ki < 120)
T.reads([A[vi, vk]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk]
with T.block("B_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ki,
dtype="handle",
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.where(ki == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def single_reduction_loop_with_block_predicate(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.serial(256):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_1, k])
T.writes(T_softmax_maxelem_shared[i0_1])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.float32(-3.4028234663852886e38)
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[i0_2, k], T_softmax_maxelem_shared[i0_2])
T.writes(T_softmax_expsum_shared[i0_2])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_3, i1], T_softmax_maxelem_shared[i0_3], T_softmax_expsum_shared[i0_3]
)
T.writes(T_softmax_norm[i0_3, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_3, i1] = (
T.exp(A[i0_3, i1] - T_softmax_maxelem_shared[i0_3], dtype="float32")
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_single_reduction_loop_with_block_predicate(
A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")
) -> None:
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
cross_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(256):
for ax0 in T.serial(1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_in_thread_init"):
T.reads()
T.writes(in_thread_0[0])
in_thread_0[0] = T.float32(-3.4028234663852886e38)
for ax1_0 in T.serial(1):
with T.block("T_softmax_maxelem_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_1 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_1, k])
T.writes(in_thread_0[0])
in_thread_0[0] = T.max(in_thread_0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread"):
T.reads(in_thread_0[0])
T.writes(cross_thread_0[0])
T.attr(
T.comm_reducer(
lambda x, y: T.max(x, y), [T.float32(-3.4028234663852886e38)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_0[0],
True,
cross_thread_0[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0 + ax0)
T.where(ax1_1 == 0)
T.reads(cross_thread_0[0])
T.writes(T_softmax_maxelem_shared[i0_2])
T_softmax_maxelem_shared[i0_2] = cross_thread_0[0]
for ax0 in T.serial(1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum_in_thread_init"):
T.reads()
T.writes(in_thread_1[0])
in_thread_1[0] = T.float32(0)
for ax1_0 in T.serial(1):
with T.block("T_softmax_expsum_in_thread"):
T.where(ax1_0 * 512 + ax1_1 < 256)
i0_3 = T.axis.spatial(256, i0 + ax0)
k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.reads(A[i0_3, k], T_softmax_maxelem_shared[i0_3])
T.writes(in_thread_1[0])
in_thread_1[0] = in_thread_1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread"):
T.reads(in_thread_1[0])
T.writes(cross_thread_1[0])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
in_thread_1[0],
True,
cross_thread_1[0],
ax1_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0 + ax0)
T.where(ax1_1 == 0)
T.reads(cross_thread_1[0])
T.writes(T_softmax_expsum_shared[i0_4])
T_softmax_expsum_shared[i0_4] = cross_thread_1[0]
for i1_0 in T.serial(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(
A[i0_5, i1], T_softmax_maxelem_shared[i0_5], T_softmax_expsum_shared[i0_5]
)
T.writes(T_softmax_norm[i0_5, i1])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(A[i0_5, i1] - T_softmax_maxelem_shared[i0_5], dtype="float32")
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def single_reduction_loop_with_tensorize(
input_A: T.Buffer((1, 64, 7, 7, 32), "uint8"),
input_B: T.Buffer((16, 64, 1, 1, 8, 32, 4), "int8"),
output: T.Buffer((1, 16, 7, 7, 32), "int32"),
) -> None:
# body
# with T.block("root")
for i1, i2, i3, i4, i5 in T.grid(16, 4, 98, 2, 32):
with T.block("compute_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1)
oh = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) // 3584)
ow = T.axis.spatial(7, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 3584 // 512)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer = T.axis.reduce(64, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 512 // 8)
ic_f_inner = T.axis.reduce(8, (i2 * 6272 + i3 * 64 + i4 * 32 + i5) % 8)
T.reads(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
with T.init():
for x in T.serial(32):
with T.block("compute_init"):
oc_block_i_init = T.axis.spatial(32, x)
T.reads()
T.writes(output[n, oc_chunk, oh, ow, oc_block_i_init])
output[n, oc_chunk, oh, ow, oc_block_i_init] = 0
with T.block("compute_o"):
T.reads(
output[n, oc_chunk, oh, ow, 0:32],
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
)
T.writes(output[n, oc_chunk, oh, ow, 0:32])
A = T.match_buffer(
input_A[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
[4],
dtype="uint8",
offset_factor=1,
)
B = T.match_buffer(
input_B[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:32, 0:4],
[32, 4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(
output[n, oc_chunk, oh, ow, 0:32], [32], dtype="int32", offset_factor=1
)
A_u8x4: T.uint8x4 = A[0:4]
A_i32: T.int32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x128 = B[0, 0:128]
B_i32x32: T.int32x32 = T.reinterpret(B_i8x128, dtype="int32x32")
C[0:32] = T.call_llvm_pure_intrin(
4217, T.uint32(3), C[0:32], T.broadcast(A_i32, 32), B_i32x32, dtype="int32x32"
)
@T.prim_func
def nested_reduction_loop_with_inner_match_buffers(
in0: T.Buffer((4, 16), "int8"),
in1: T.Buffer((4, 16), "int8"),
out: T.Buffer((4, 4), "int32"),
) -> None:
# body
# with T.block("root")
for y in T.serial(4):
with T.block("C"):
yi = T.axis.spatial(4, y)
T.reads(in0[yi, 0:16], in1[yi, 0:16])
T.writes(out[yi, 0:4])
for x in T.serial(4):
with T.block("C"):
xr = T.axis.reduce(4, x)
with T.init():
for i in T.serial(4):
with T.block("C_init"):
ii = T.axis.spatial(4, i)
T.reads()
T.writes(out[yi, ii])
out[yi, ii] = 0
with T.block("C"):
T.reads(
out[yi, xr],
in0[yi, yi * 4 + xr : yi * 4 + xr + 4],
in1[yi, yi * 4 + xr : yi * 4 + xr + 4],
)
T.writes(out[yi, xr])
A = T.match_buffer(
in0[yi, yi * 4 + xr : yi * 4 + xr + 4],
[4],
dtype="int8",
offset_factor=1,
)
B = T.match_buffer(
in1[yi, yi * 4 + xr : yi * 4 + xr + 4],
[4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(out[yi, xr], [1], dtype="int32", offset_factor=1)
A_i8x4: T.int8x4 = A[0:4]
A_i32: T.int32 = T.reinterpret(A_i8x4, dtype="int32")
B_i8x4: T.int8x4 = B[0:4]
B_i32: T.int32 = T.reinterpret(B_i8x4, dtype="int32")
C[0] = A_i32 + B_i32 + C[0]
@T.prim_func
def reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.min_value("float32")
B[vi] = T.max(B[vi], A[vi, vk])
@T.prim_func
def lowered_reducer_max(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], True, reduce_temp0[0], k, dtype="handle"
)
)
with T.block("B_write_back"):
vi = T.axis.spatial(128, i)
T.where(k == 0)
T.reads([reduce_temp0[0]])
T.writes([B[vi]])
B[vi] = reduce_temp0[0]
@T.prim_func
def zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([B[()]])
with T.init():
B[()] = T.float32(0)
B[()] = B[()] + A[vk]
@T.prim_func
def lowered_zero_rank_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B_cross_thread_reduction"):
vk = T.axis.reduce(128, k)
T.reads([A[vk]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(T.uint32(1), A[vk], True, reduce_temp0[0], k, dtype="handle")
)
with T.block("B_write_back"):
T.reads([reduce_temp0[0]])
T.writes([B[()]])
T.where(k == 0)
B[()] = reduce_temp0[0]
@T.prim_func
def multiple_bufferstore(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
C = T.alloc_buffer([], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk], B[vi], C[()]])
T.writes([B[vi], C[()]])
with T.init():
B[vi] = T.float32(0)
C[()] = A[vi, vk]
B[vi] = B[vi] + C[()]
@T.prim_func
def reduction_loop_not_deepest(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
for i in T.serial(0, 128):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def reduction_loop_bound_to_blockidx(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="blockIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] + A[vi, vk]
@T.prim_func
def different_access_indices(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128, 128], dtype="float32")
B = T.match_buffer(b, [128, 128], dtype="float32")
for i, j in T.grid(128, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads([A[vi, vj, vk]])
T.writes(
[
B[
T.min(vj, vi) : T.min(vj, vi) + (T.max(vj, vi) + 1 - T.min(vj, vi)),
T.min(vi, vj) : T.min(vi, vj) + (T.max(vi, vj) + 1 - T.min(vi, vj)),
]
]
)
with T.init():
B[vj, vi] = T.float32(0)
B[vi, vj] = B[vi, vj] + A[vi, vj, vk]
@T.prim_func
def invalid_reducer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float32")
B = T.match_buffer(b, [128], dtype="float32")
for i in T.serial(0, 128):
for k in T.thread_binding(0, 128, thread="threadIdx.x"):
with T.block("B"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads([A[vi, vk]])
T.writes([B[vi]])
with T.init():
B[vi] = T.float32(0)
B[vi] = B[vi] - A[vi, vk]
@T.prim_func
def softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([T_softmax_maxelem_shared[i0_1]])
with T.init():
T_softmax_maxelem_shared[i0_1] = T.min_value("float32")
T_softmax_maxelem_shared[i0_1] = T.max(
T_softmax_maxelem_shared[i0_1], A[i0_1, k]
)
for ax0_0 in T.serial(0, 8):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
i0_2 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_2, k],
T_softmax_maxelem_shared[i0_2],
]
)
T.writes([T_softmax_expsum_shared[i0_2]])
with T.init():
T_softmax_expsum_shared[i0_2] = T.float32(0)
T_softmax_expsum_shared[i0_2] = T_softmax_expsum_shared[i0_2] + T.exp(
A[i0_2, k] - T_softmax_maxelem_shared[i0_2], dtype="float32"
)
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_3 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_3, i1],
T_softmax_maxelem_shared[i0_3],
T_softmax_expsum_shared[i0_3],
]
)
T.writes([T_softmax_norm[i0_3, i1]])
T.block_attr({"axis": 1})
T_softmax_norm[i0_3, i1] = (
T.exp(
A[i0_3, i1] - T_softmax_maxelem_shared[i0_3],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_3]
)
@T.prim_func
def lowered_softmax(var_A: T.handle, var_T_softmax_norm: T.handle) -> None:
A = T.match_buffer(var_A, [256, 256], dtype="float32")
T_softmax_norm = T.match_buffer(var_T_softmax_norm, [256, 256], dtype="float32")
T_softmax_maxelem_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
T_softmax_expsum_shared = T.alloc_buffer([256], dtype="float32", scope="shared")
reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
normal_reduce_temp1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.thread_binding(0, 256, thread="blockIdx.x"):
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_maxelem_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.min_value("float32")
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_maxelem_normal_reduction"):
i0_1 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads([A[i0_1, k]])
T.writes([normal_reduce_temp0[0]])
normal_reduce_temp0[0] = T.max(normal_reduce_temp0[0], A[i0_1, k])
with T.block("T_softmax_maxelem_cross_thread_reduction"):
T.reads([normal_reduce_temp0[0]])
T.writes([reduce_temp0[0]])
T.attr(
T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp0[0],
True,
reduce_temp0[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_maxelem_write_back"):
i0_2 = T.axis.spatial(256, i0)
T.where(ax0_1 == 0)
T.reads([reduce_temp0[0]])
T.writes([T_softmax_maxelem_shared[i0_2]])
T_softmax_maxelem_shared[i0_2] = reduce_temp0[0]
for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_expsum_normal_reduction_init"):
T.reads([])
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = T.float32(0)
for ax0_0 in T.serial(0, 8):
with T.block("T_softmax_expsum_normal_reduction"):
i0_3 = T.axis.spatial(256, i0)
k = T.axis.reduce(256, ax0_0 * 32 + ax0_1)
T.reads(
[
A[i0_3, k],
T_softmax_maxelem_shared[i0_3],
]
)
T.writes([normal_reduce_temp1[0]])
normal_reduce_temp1[0] = normal_reduce_temp1[0] + T.exp(
A[i0_3, k] - T_softmax_maxelem_shared[i0_3], dtype="float32"
)
with T.block("T_softmax_expsum_cross_thread_reduction"):
T.reads([normal_reduce_temp1[0]])
T.writes([reduce_temp1[0]])
T.attr(
T.comm_reducer(lambda x_1, y_1: x_1 + y_1, [T.float32(0)]),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce_temp1[0],
True,
reduce_temp1[0],
ax0_1,
dtype="handle",
)
)
with T.block("T_softmax_expsum_write_back"):
i0_4 = T.axis.spatial(256, i0)
T.where(ax0_1 == 0)
T.reads([reduce_temp1[0]])
T.writes([T_softmax_expsum_shared[i0_4]])
T_softmax_expsum_shared[i0_4] = reduce_temp1[0]
for i1_0 in T.serial(0, 8):
for i1_1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
i0_5 = T.axis.spatial(256, i0)
i1 = T.axis.spatial(256, i1_0 * 32 + i1_1)
T.reads(
[
A[i0_5, i1],
T_softmax_maxelem_shared[i0_5],
T_softmax_expsum_shared[i0_5],
]
)
T.writes([T_softmax_norm[i0_5, i1]])
T.block_attr({"axis": 1})
T_softmax_norm[i0_5, i1] = (
T.exp(
A[i0_5, i1] - T_softmax_maxelem_shared[i0_5],
dtype="float32",
)
/ T_softmax_expsum_shared[i0_5]
)
@T.prim_func
def argmax_split(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def lowered_argmax_split(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
cross_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmax_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmax_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmax_in_thread_init"):
T.reads()
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
in_thread_argmax_v0[0] = -1
in_thread_argmax_v1[0] = T.float32(-3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmax_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
v_argmax_v0: T.int32 = T.Select(
in_thread_argmax_v1[0] >= val[i, k], in_thread_argmax_v0[0], idx[i, k]
)
v_argmax_v1: T.float32 = T.Select(
in_thread_argmax_v1[0] >= val[i, k], in_thread_argmax_v1[0], val[i, k]
)
in_thread_argmax_v0[0] = v_argmax_v0
in_thread_argmax_v1[0] = v_argmax_v1
with T.block("argmax_cross_thread"):
T.reads(in_thread_argmax_v0[0], in_thread_argmax_v1[0])
T.writes(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 >= y1, x0, y0),
T.Select(x1 >= y1, x1, y1),
),
[-1, T.float32(-3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmax_v0[0],
in_thread_argmax_v1[0],
True,
cross_thread_argmax_v0[0],
cross_thread_argmax_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmax_write_back"):
i = T.axis.spatial(128, i0)
T.where(i1_1 == 0)
T.reads(cross_thread_argmax_v0[0], cross_thread_argmax_v1[0])
T.writes(argmax_v0[i], argmax_v1[i])
argmax_v0[i] = cross_thread_argmax_v0[0]
argmax_v1[i] = cross_thread_argmax_v1[0]
@T.prim_func
def argmin_split_init_update_reordered(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmin_v0: T.Buffer((128,), "int32"),
argmin_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1_0 in T.grid(128, 4):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(argmin_v0[i], argmin_v1[i])
with T.init():
argmin_v1[i] = T.float32(3.4028234663852886e38)
argmin_v0[i] = -1
v_argmin_v0: T.int32 = T.Select(argmin_v1[i] <= val[i, k], argmin_v0[i], idx[i, k])
v_argmin_v1: T.float32 = T.Select(
argmin_v1[i] <= val[i, k], argmin_v1[i], val[i, k]
)
argmin_v1[i] = v_argmin_v1
argmin_v0[i] = v_argmin_v0
@T.prim_func
def lowered_argmin_split_init_update_reordered(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmin_v0: T.Buffer((128,), "int32"),
argmin_v1: T.Buffer((128,), "float32"),
) -> None:
cross_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
cross_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_argmin_v0 = T.alloc_buffer([1], dtype="int32", strides=[1], scope="local")
in_thread_argmin_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0 in T.serial(128):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("argmin_in_thread_init"):
T.reads()
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
in_thread_argmin_v0[0] = -1
in_thread_argmin_v1[0] = T.float32(3.4028234663852886e38)
for i1_0 in T.serial(4):
with T.block("argmin_in_thread"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1_0 * 32 + i1_1)
T.reads(idx[i, k], val[i, k])
T.writes(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
v_argmin_v0: T.int32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v0[0], idx[i, k]
)
v_argmin_v1: T.float32 = T.Select(
in_thread_argmin_v1[0] <= val[i, k], in_thread_argmin_v1[0], val[i, k]
)
in_thread_argmin_v1[0] = v_argmin_v1
in_thread_argmin_v0[0] = v_argmin_v0
with T.block("argmin_cross_thread"):
T.reads(in_thread_argmin_v0[0], in_thread_argmin_v1[0])
T.writes(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (
T.Select(x1 <= y1, x0, y0),
T.Select(x1 <= y1, x1, y1),
),
[-1, T.float32(3.4028234663852886e38)],
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_argmin_v0[0],
in_thread_argmin_v1[0],
True,
cross_thread_argmin_v0[0],
cross_thread_argmin_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("argmin_write_back"):
i = T.axis.spatial(128, i0)
T.where(i1_1 == 0)
T.reads(cross_thread_argmin_v0[0], cross_thread_argmin_v1[0])
T.writes(argmin_v0[i], argmin_v1[i])
argmin_v0[i] = cross_thread_argmin_v0[0]
argmin_v1[i] = cross_thread_argmin_v1[0]
@T.prim_func
def layer_norm_tuple_sum(
data: T.Buffer((128, 768), "float32"),
gamma: T.Buffer(768, "float32"),
bias: T.Buffer(768, "float32"),
T_layer_norm: T.Buffer((128, 768), "float32"),
) -> None:
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([128], dtype="float32")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_0 in T.serial(24):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
with T.init():
data_red_temp_v0[ax0] = T.float32(0)
data_red_temp_v1[ax0] = T.float32(0)
v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1]
)
data_red_temp_v0[ax0] = v_data_red_temp_v0
data_red_temp_v1[ax0] = v_data_red_temp_v1
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0],
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.float32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
@T.prim_func
def lowered_layer_norm_tuple_sum(
data: T.Buffer((128, 768), "float32"),
gamma: T.Buffer(768, "float32"),
bias: T.Buffer(768, "float32"),
T_layer_norm: T.Buffer((128, 768), "float32"),
) -> None:
# with T.block("root")
data_red_temp_v0 = T.alloc_buffer([128], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([128], dtype="float32")
cross_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
cross_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
in_thread_data_red_temp_v1 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local")
for i0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i1_1 in T.thread_binding(32, thread="threadIdx.x"):
with T.block("data_red_temp_in_thread_init"):
T.reads()
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
in_thread_data_red_temp_v0[0] = T.float32(0)
in_thread_data_red_temp_v1[0] = T.float32(0)
for i1_0 in T.serial(24):
with T.block("data_red_temp_in_thread"):
ax0 = T.axis.spatial(128, i0_fused)
k1 = T.axis.reduce(768, i1_0 * 32 + i1_1)
T.reads(data[ax0, k1])
T.writes(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
v_data_red_temp_v0: T.float32 = in_thread_data_red_temp_v0[0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
in_thread_data_red_temp_v1[0] + data[ax0, k1] * data[ax0, k1]
)
in_thread_data_red_temp_v0[0] = v_data_red_temp_v0
in_thread_data_red_temp_v1[0] = v_data_red_temp_v1
with T.block("data_red_temp_cross_thread"):
T.reads(in_thread_data_red_temp_v0[0], in_thread_data_red_temp_v1[0])
T.writes(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.attr(
T.comm_reducer(
lambda x0, x1, y0, y1: (x0 + y0, x1 + y1), [T.float32(0), T.float32(0)]
),
"reduce_scope",
T.reinterpret(T.uint64(0), dtype="handle"),
)
T.evaluate(
T.tvm_thread_allreduce(
T.uint32(2),
in_thread_data_red_temp_v0[0],
in_thread_data_red_temp_v1[0],
True,
cross_thread_data_red_temp_v0[0],
cross_thread_data_red_temp_v1[0],
i1_1,
dtype="handle",
)
)
with T.block("data_red_temp_write_back"):
ax0 = T.axis.spatial(128, i0_fused)
T.where(i1_1 == 0)
T.reads(cross_thread_data_red_temp_v0[0], cross_thread_data_red_temp_v1[0])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
data_red_temp_v0[ax0] = cross_thread_data_red_temp_v0[0]
data_red_temp_v1[ax0] = cross_thread_data_red_temp_v1[0]
for i0_i1_fused_0 in T.thread_binding(384, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_layer_norm"):
ax0 = T.axis.spatial(128, (i0_i1_fused_0 * 256 + i0_i1_fused_1) // 768)
ax1 = T.axis.spatial(768, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 768)
T.reads(
data[ax0, ax1],
data_red_temp_v0[ax0],
data_red_temp_v1[ax0],
gamma[ax1],
bias[ax1],
)
T.writes(T_layer_norm[ax0, ax1])
T_layer_norm[ax0, ax1] = (
data[ax0, ax1] - data_red_temp_v0[ax0] * T.float32(0.0013020833333333333)
) * T.rsqrt(
data_red_temp_v1[ax0] * T.float32(0.0013020833333333333)
- data_red_temp_v0[ax0]
* T.float32(0.0013020833333333333)
* (data_red_temp_v0[ax0] * T.float32(0.0013020833333333333))
+ T.float32(1.0000000000000001e-05),
dtype="float32",
) * gamma[
ax1
] + bias[
ax1
]
@T.prim_func
def thread_broadcast_1(A: T.Buffer((256, 256), "float32"), B: T.Buffer((256,), "float32")):
temp_local = T.alloc_buffer((256,), scope="local")
for i in T.thread_binding(256, thread="blockIdx.x"):
for k in T.thread_binding(256, thread="threadIdx.x"):
with T.block("sum"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads(A[vi, vk])
T.writes(temp_local[vi])
with T.init():
temp_local[vi] = T.float32(0)
temp_local[vi] = temp_local[vi] + A[vi, vk]
with T.block("add"):
vi = T.axis.spatial(256, i)
T.reads(temp_local[vi])
T.writes(B[vi])
B[vi] = temp_local[vi] + T.float32(1)
@T.prim_func
def lowered_thread_broadcast_1(A: T.Buffer((256, 256), "float32"), B: T.Buffer((256,), "float32")):
temp_local = T.alloc_buffer((256,), scope="local")
cross_thread_temp_local = T.alloc_buffer((1,), strides=(1,), scope="local")
for i in T.thread_binding(256, thread="blockIdx.x"):
for k in T.thread_binding(256, thread="threadIdx.x"):
with T.block("sum_cross_thread"):
vi, vk = T.axis.remap("SR", [i, k])
T.reads(A[vi, vk])
T.writes(cross_thread_temp_local[0])
T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
)
T.tvm_thread_allreduce(
T.uint32(1), A[vi, vk], T.bool(True), cross_thread_temp_local[0], k
)
with T.block("sum_write_back"):
vi = T.axis.spatial(256, i)
T.where(k == 0)
T.reads(cross_thread_temp_local[0])
T.writes(temp_local[vi])
temp_local[vi] = cross_thread_temp_local[0]
for tx in T.thread_binding(256, thread="threadIdx.x"):
with T.block("add"):
vi = T.axis.spatial(256, i)
T.where(tx == 0)
T.reads(temp_local[vi])
T.writes(B[vi])
B[vi] = temp_local[vi] + T.float32(1)
# fmt: off
@T.prim_func
def thread_broadcast_2(lv1605: T.Buffer((T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16"), p_lv1606: T.handle, p_lv1582: T.handle, p_output0: T.handle):
n = T.int64()
lv1606 = T.match_buffer(p_lv1606, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
lv1582 = T.match_buffer(p_lv1582, (T.int64(1), T.int64(1), T.int64(1), n), "float16")
var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), T.int64(32), T.int64(1), n))
var_NT_matmul_intermediate_local = T.alloc_buffer((T.int64(1), T.int64(32), T.int64(1), n), "float16", scope="local")
var_NT_matmul_intermediate_rf_local = T.alloc_buffer((T.int64(256), T.int64(1), T.int64(32), T.int64(1), n), "float16", scope="local")
for ax0_ax1_fused in T.thread_binding(n * T.int64(32), thread="blockIdx.x"):
for ax2_fused_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"):
with T.block("NT_matmul_rf_init"):
vax2_fused_1 = T.axis.spatial(T.int64(256), ax2_fused_1)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.reads()
T.writes(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] = T.float16(0)
for ax2_fused_0 in range(T.int64(1)):
with T.block("NT_matmul_rf_update"):
vax2_fused_1 = T.axis.spatial(T.int64(256), ax2_fused_1)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
vax2_fused_0 = T.axis.reduce(T.int64(1), ax2_fused_0)
T.where(ax2_fused_0 * T.int64(256) + ax2_fused_1 < T.int64(128))
T.reads(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1], lv1605[T.int64(0), v0, T.int64(0), vax2_fused_0 * T.int64(256) + vax2_fused_1], lv1606[T.int64(0), v0, v1, vax2_fused_0 * T.int64(256) + vax2_fused_1])
T.writes(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] = var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] + lv1605[T.int64(0), v0, T.int64(0), vax2_fused_0 * T.int64(256) + vax2_fused_1] * lv1606[T.int64(0), v0, v1, vax2_fused_0 * T.int64(256) + vax2_fused_1]
for ax1_ax2_fused in range(T.int64(1)):
for ax0_fused in T.thread_binding(T.int64(256), thread="threadIdx.x"):
with T.block("NT_matmul"):
vax2_fused_1 = T.axis.reduce(T.int64(256), ax0_fused)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.where(T.int64(0) <= ax0_ax1_fused // n and ax0_ax1_fused // n < T.int64(32) and T.int64(0) <= ax0_ax1_fused % n and ax0_ax1_fused % n < n)
T.reads(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
T.writes(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1])
with T.init():
var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] = T.float16(0)
var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] = var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] + var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1]
with T.block("compute"):
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.where(T.int64(0) <= ax0_ax1_fused // n and ax0_ax1_fused // n < T.int64(32) and T.int64(0) <= ax0_ax1_fused % n and ax0_ax1_fused % n < n)
T.reads(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1], lv1582[T.int64(0), T.int64(0), T.int64(0), v1])
T.writes(var_compute_intermediate[T.int64(0), v0, T.int64(0), v1])
var_compute_intermediate[T.int64(0), v0, T.int64(0), v1] = T.Cast("float32", T.min(T.max(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] * T.float16(0.088397790055248615), T.float16(-65504)), lv1582[T.int64(0), T.int64(0), T.int64(0), v1]))
@T.prim_func
def lowered_thread_broadcast_2(lv1605: T.Buffer((T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16"), p_lv1606: T.handle, p_lv1582: T.handle, p_output0: T.handle):
n = T.int64()
lv1606 = T.match_buffer(p_lv1606, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
lv1582 = T.match_buffer(p_lv1582, (T.int64(1), T.int64(1), T.int64(1), n), "float16")
var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), T.int64(32), T.int64(1), n))
var_NT_matmul_intermediate_local = T.alloc_buffer((T.int64(1), T.int64(32), T.int64(1), n), "float16", scope="local")
var_NT_matmul_intermediate_rf_local = T.alloc_buffer((T.int64(256), T.int64(1), T.int64(32), T.int64(1), n), "float16", scope="local")
cross_thread_var_NT_matmul_intermediate_local = T.alloc_buffer((1,), "float16", strides=(1,), scope="local")
in_thread_var_NT_matmul_intermediate_local = T.alloc_buffer((1,), "float16", strides=(1,), scope="local")
for ax0_ax1_fused in T.thread_binding(n * T.int64(32), thread="blockIdx.x"):
for ax2_fused_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"):
with T.block("NT_matmul_rf_init"):
vax2_fused_1 = T.axis.spatial(T.int64(256), ax2_fused_1)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.reads()
T.writes(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] = T.float16(0)
for ax2_fused_0 in range(T.int64(1)):
with T.block("NT_matmul_rf_update"):
vax2_fused_1 = T.axis.spatial(T.int64(256), ax2_fused_1)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
vax2_fused_0 = T.axis.reduce(T.int64(1), ax2_fused_0)
T.where(ax2_fused_0 * T.int64(256) + ax2_fused_1 < T.int64(128))
T.reads(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1], lv1605[T.int64(0), v0, T.int64(0), vax2_fused_0 * T.int64(256) + vax2_fused_1], lv1606[T.int64(0), v0, v1, vax2_fused_0 * T.int64(256) + vax2_fused_1])
T.writes(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] = var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1] + lv1605[T.int64(0), v0, T.int64(0), vax2_fused_0 * T.int64(256) + vax2_fused_1] * lv1606[T.int64(0), v0, v1, vax2_fused_0 * T.int64(256) + vax2_fused_1]
for ax1_ax2_fused in range(T.int64(1)):
for ax0_fused in T.thread_binding(T.int64(256), thread="threadIdx.x"):
with T.block("NT_matmul_in_thread_init"):
T.reads()
T.writes(in_thread_var_NT_matmul_intermediate_local[0])
in_thread_var_NT_matmul_intermediate_local[0] = T.float16(0)
with T.block("NT_matmul_in_thread"):
vax2_fused_1 = T.axis.reduce(T.int64(256), ax0_fused)
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.where(T.int64(0) <= ax0_ax1_fused // n and ax0_ax1_fused // n < T.int64(32) and T.int64(0) <= ax0_ax1_fused % n and ax0_ax1_fused % n < n)
T.reads(var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1])
T.writes(in_thread_var_NT_matmul_intermediate_local[0])
in_thread_var_NT_matmul_intermediate_local[0] = in_thread_var_NT_matmul_intermediate_local[0] + var_NT_matmul_intermediate_rf_local[vax2_fused_1, T.int64(0), v0, T.int64(0), v1]
with T.block("NT_matmul_cross_thread"):
T.reads(in_thread_var_NT_matmul_intermediate_local[0])
T.writes(cross_thread_var_NT_matmul_intermediate_local[0])
T.attr(T.comm_reducer(lambda x0, y0: x0 + y0, [T.float16(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0)))
T.tvm_thread_allreduce(T.uint32(1), in_thread_var_NT_matmul_intermediate_local[0], T.bool(True), cross_thread_var_NT_matmul_intermediate_local[0], ax0_fused)
with T.block("NT_matmul_write_back"):
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.where(ax0_fused == T.int64(0))
T.reads(cross_thread_var_NT_matmul_intermediate_local[0])
T.writes(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1])
var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] = cross_thread_var_NT_matmul_intermediate_local[0]
for tx in T.thread_binding(T.int64(256), thread="threadIdx.x"):
with T.block("compute"):
v0 = T.axis.spatial(T.int64(32), ax0_ax1_fused // n)
v1 = T.axis.spatial(n, ax0_ax1_fused % n)
T.where(tx == T.int64(0) and (T.int64(0) <= ax0_ax1_fused // n and ax0_ax1_fused // n < T.int64(32) and T.int64(0) <= ax0_ax1_fused % n and ax0_ax1_fused % n < n))
T.reads(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1], lv1582[T.int64(0), T.int64(0), T.int64(0), v1])
T.writes(var_compute_intermediate[T.int64(0), v0, T.int64(0), v1])
var_compute_intermediate[T.int64(0), v0, T.int64(0), v1] = T.Cast("float32", T.min(T.max(var_NT_matmul_intermediate_local[T.int64(0), v0, T.int64(0), v1] * T.float16(0.088397790055248615), T.float16(-65504)), lv1582[T.int64(0), T.int64(0), T.int64(0), v1]))
# fmt: on
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
def test_loop_split():
_check(loop_split, lowered_loop_split)
def test_no_normal_reduction():
_check(no_normal_reduction, lowered_no_normal_reduction)
def test_two_bound_loops():
_check(two_bound_loops, lowered_two_bound_loops)
def test_multiple_blocks_under_reduction_loop():
_check(multiple_blocks_under_reduction_loop, lowered_multiple_blocks_under_reduction_loop)
def test_with_block_predicate():
_check(with_block_predicate, lowered_with_block_predicate)
def test_single_reduction_loop_with_block_predicate():
_check(
single_reduction_loop_with_block_predicate,
lowered_single_reduction_loop_with_block_predicate,
)
def test_single_reduction_loop_with_tensorize():
_check(
single_reduction_loop_with_tensorize,
single_reduction_loop_with_tensorize,
)
def test_nested_reduction_loop_with_inner_match_buffers():
_check(
nested_reduction_loop_with_inner_match_buffers,
nested_reduction_loop_with_inner_match_buffers,
)
def test_reducer_max():
_check(reducer_max, lowered_reducer_max)
def test_zero_rank_buffer():
_check(zero_rank_buffer, lowered_zero_rank_buffer)
def test_multiple_bufferstore():
_check_fail(multiple_bufferstore)
def test_reduction_block_not_deepest():
_check_fail(reduction_loop_not_deepest)
def test_reduction_loop_bound_to_blockidx():
_check_fail(reduction_loop_bound_to_blockidx)
def test_different_access_indices():
_check_fail(different_access_indices)
def test_invalid_reducer():
_check_fail(invalid_reducer)
def test_softmax():
_check(softmax, lowered_softmax)
def test_argmax_split():
_check(argmax_split, lowered_argmax_split)
def test_argmin_split_init_update_reordered():
_check(argmin_split_init_update_reordered, lowered_argmin_split_init_update_reordered)
def test_thread_broadcast_rewrite_1():
_check(thread_broadcast_1, lowered_thread_broadcast_1)
def test_thread_broadcast_rewrite_2():
_check(thread_broadcast_2, lowered_thread_broadcast_2)
def test_lower_te():
a = te.placeholder((32, 2, 2))
k1 = te.reduce_axis((0, 2), "k1")
k2 = te.reduce_axis((0, 2), "k2")
b = te.compute((32,), lambda i: te.sum(a[i, k1, k2], axis=[k1, k2]))
s = te.create_schedule(b.op)
s[b].bind(k1, te.thread_axis("threadIdx.x"))
s[b].bind(k2, te.thread_axis("threadIdx.y"))
orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b])
mod = tvm.tir.transform.LowerCrossThreadReduction()(orig_mod)
tvm.ir.assert_structural_equal(
mod, orig_mod
) # LowerCrossThreadReduction should do nothing on TE
def test_layer_norm_tuple_sum():
_check(layer_norm_tuple_sum, lowered_layer_norm_tuple_sum)
if __name__ == "__main__":
tvm.testing.main()
| 73,342 | 46.135604 | 339 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_remove_weight_layout_rewrite_block.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import tvm
from tvm.ir.module import IRModule
from tvm.script import tir as T
from tvm.tir.function import PrimFunc
def _check(before, expect):
if isinstance(before, PrimFunc):
before = IRModule({"main": before})
if isinstance(expect, PrimFunc):
expect = IRModule({"main": expect})
mod = tvm.tir.transform.RemoveWeightLayoutRewriteBlock()(before)
tvm.ir.assert_structural_equal(mod, expect)
def test_matmul():
@T.prim_func
def before(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 16), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
B_ = T.alloc_buffer([16, 4, 4], dtype="float32")
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads(B[i0, i1])
T.writes(B_[i1, i0 // 4, i0 % 4])
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
B_[i1, i0 // 4, i0 % 4] = B[i0, i1]
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B_[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B_[vj, vk // 4, vk % 4]
@T.prim_func
def after(
A: T.Buffer((16, 16), "float32"),
B: T.Buffer((16, 4, 4), "float32"),
C: T.Buffer((16, 16), "float32"),
) -> None:
T.func_attr({"layout_free_buffers": [1]})
for i0_o, i1_o in T.grid(16, 16):
with T.block("layout_rewrite"):
i0, i1 = T.axis.remap("SS", [i0_o, i1_o])
T.reads()
T.writes()
T.block_attr({"meta_schedule.layout_rewrite_preproc": True})
T.evaluate(0)
for i0, j, k0, i1, k1 in T.grid(4, 16, 4, 4, 4):
with T.block("matmul"):
vi = T.axis.spatial(16, i0 * 4 + i1)
vj = T.axis.spatial(16, j)
vk = T.axis.reduce(16, k0 * 4 + k1)
T.reads(A[vi, vk], B[vj, vk // 4, vk % 4])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk // 4, vk % 4]
_check(before, after)
if __name__ == "__main__":
test_matmul()
| 3,463 | 36.652174 | 76 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_cuda_layout_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import random
import tempfile
from typing import Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule, relay
from tvm.meta_schedule.schedule.cuda.layout_transform import (
cuda_layout_transform_schedule_rule,
)
from tvm.relay.op import OpPattern
from tvm.script import ir as I
from tvm.script import tir as T
from tvm.tir.schedule import BlockRV
# Small gpu parameters which should work for nearly every (modern-ish) gpu.
TARGET = tvm.target.Target(
"cuda -max_threads_per_block=32 -max_num_threads=128 -thread_warp_size=32 -max_shared_memory_per_block=8192 -registers_per_block=1024"
)
class PatchCustomLayoutTransformScheduleRule:
"""Patch the custom layout transform schedule to test only specific tile sizes.
If tile_sizes = [], then returns the default (non-tiled) schedule, otherwise
returns only the schedule with the given tiles.
"""
FUNC_NAME = "meta_schedule.cuda.layout_transform"
def __init__(self, tile_sizes: List[int]) -> None:
self.tile_sizes = tile_sizes
self.old_func = None
def __enter__(self, *args, **kwargs) -> None:
self.old_func = tvm.get_global_func(self.FUNC_NAME)
def new_layout_rule(
sch: tvm.tir.Schedule,
block: BlockRV,
tile_sizes: Optional[List[int]] = self.tile_sizes,
) -> List[tvm.tir.Schedule]:
return cuda_layout_transform_schedule_rule(sch, block, tile_sizes)
tvm.register_func(self.FUNC_NAME, new_layout_rule, override=True)
def __exit__(self, *args, **kwargs) -> None:
tvm.register_func(self.FUNC_NAME, self.old_func, override=True)
# Create unary functions which apply ops with compatible fusion levels to layout transform
def get_random_axis(data: relay.Expr):
rank = len(relay.transform.InferTypeLocal(data).shape)
return random.randint(0, rank - 1)
def apply_elemwise_clip(data: relay.Expr, min=0, max=10):
assert relay.op.get("clip").get_attr("TOpPattern") == OpPattern.ELEMWISE
return relay.clip(data, min, max)
def apply_broadcast_add(data: relay.Expr, val_to_add=5):
assert relay.op.get("add").get_attr("TOpPattern") == OpPattern.BROADCAST
type_info = relay.transform.InferTypeLocal(data)
return relay.add(data, relay.const(val_to_add, dtype=type_info.dtype))
def apply_injective_concatenate(data: relay.Expr, axis=None):
if axis is None:
axis = get_random_axis(data)
assert relay.op.get("concatenate").get_attr("TOpPattern") == OpPattern.INJECTIVE
return relay.concatenate([data, data], axis)
def apply_comm_reduce_max(data: relay.Expr, axis=None):
if axis is None:
axis = get_random_axis(data)
assert relay.op.get("max").get_attr("TOpPattern") == OpPattern.COMM_REDUCE
# Do this to maintain dimensions
return relay.add(data, relay.max(data, axis, keepdims=True))
pattern_level_to_op = {
OpPattern.ELEMWISE: apply_elemwise_clip,
OpPattern.BROADCAST: apply_broadcast_add,
OpPattern.INJECTIVE: apply_injective_concatenate,
OpPattern.COMM_REDUCE: apply_comm_reduce_max,
}
def apply_layout_transform(data: relay.Expr, src_layout: str, dst_layout: str):
assert relay.op.get("layout_transform").get_attr("TOpPattern") == OpPattern.INJECTIVE
return relay.layout_transform(data, src_layout, dst_layout)
def create_relay_module(
input_shape: List[int], dtype: str, ops: List[Union[OpPattern, Tuple[str, str]]]
) -> tvm.IRModule:
"""Create a relay module with the given string of ops.
ops:
Applies the associated operators in order. If an integer, refers to applying
the unary operator from `extra_pattern_level_to_op` map. If a tuple, applies
a layout transform with the given (src_layout, dst_layout)
"""
input_data = relay.var("input", shape=input_shape, dtype=dtype)
cur_data = input_data
for op_info in ops:
# Progressively build type info
relay.transform.InferTypeLocal(cur_data)
if isinstance(op_info, tuple):
# layout transform case
src_layout, dst_layout = op_info
cur_data = apply_layout_transform(cur_data, src_layout, dst_layout)
else:
cur_data = pattern_level_to_op[op_info](cur_data)
relay.transform.InferTypeLocal(cur_data)
return tvm.IRModule.from_expr(cur_data)
def extract_layout_transform_task(
mod: tvm.IRModule, target: tvm.target.Target
) -> meta_schedule.ExtractedTask:
"""Given a relay IRModule, return the PrimFunc IRModule with fused layout transform task."""
extracted_tasks = meta_schedule.relay_integration.extract_tasks(
mod,
target,
{},
pass_config={"relay.backend.use_meta_schedule": True},
)
task_of_interest = None
for task in extracted_tasks:
if "layout_transform" in task.task_name:
task_of_interest = task
break
assert task_of_interest is not None
return task_of_interest
def run_primfunc(
primfunc_mod: tvm.IRModule, target: tvm.target.Target, input_tensors: List[tvm.nd.NDArray]
):
"""Compile and run the primfunc with the given input tensors."""
with tvm.transform.PassContext(
config={"relay.backend.use_meta_schedule": True},
opt_level=3,
):
lib = tvm.build(primfunc_mod, target=target)
lib(*input_tensors)
@pytest.mark.skip("Integration test")
class TestRandomRelayE2ECorrectness:
"""Tests E2E correctness of layout transform schedule.
Randomly generates relay mod with layout transform and fusable ops. Checks the
layout transform task for correctness by comparing against its unscheduled result.
"""
@staticmethod
def generate_test_case(
input_shape: List[int],
implicit_reshape_info: Optional[Tuple[int, int]],
dtype: str,
num_additional_ops: int,
) -> tvm.IRModule:
"""Creates a random layout transform module with up to num_additional_ops fused."""
# Create layout transforms
rank = len(input_shape)
# src_layout is a string like ABCDEFG... with length as rank
src_layout = "".join([chr(i + ord("A")) for i in range(rank)])
# dst_layout is randomly shuffled src_layout, potentially after adding split axis
dst_layout = list(src_layout)
if implicit_reshape_info:
axis_to_reshape, size_new_dim = implicit_reshape_info
cur_dim = dst_layout[axis_to_reshape]
dst_layout[axis_to_reshape] = f"{cur_dim}"
dst_layout.append(f"{size_new_dim}{cur_dim.lower()}")
random.shuffle(dst_layout)
while "".join(dst_layout) == src_layout:
random.shuffle(dst_layout)
dst_layout = "".join(dst_layout)
# Randomly sample a list of potentially fusable ops to layout transform
op_order = random.choices(
list(pattern_level_to_op.keys()),
k=num_additional_ops,
)
# Append tuple, representing layout transfomr from src --> dst layout
op_order.append((src_layout, dst_layout))
random.shuffle(op_order)
return create_relay_module(input_shape, dtype, op_order)
@staticmethod
def get_primfunc(extracted_task: meta_schedule.ExtractedTask, tile_size: Optional[int]):
with PatchCustomLayoutTransformScheduleRule(
tile_sizes=[] if tile_size is None else [tile_size]
):
with tempfile.TemporaryDirectory() as tmpdir:
(
tune_contexts,
_,
) = meta_schedule.relay_integration.extracted_tasks_to_tune_contexts(
[extracted_task],
tmpdir,
)
tune_contexts[0].pre_tuning(1)
candidates = tune_contexts[0].generate_measure_candidates()
primfunc = candidates[0].sch.mod["main"]
return primfunc
@staticmethod
def verify_layout_transform_task(
extracted_task: meta_schedule.ExtractedTask,
target: tvm.target.Target,
tile_sizes: List[int],
):
"""Given a layout transform task, tests the given tile_sizes and verifies output matches."""
device = tvm.cuda(0)
relay_mod = extracted_task.mod
# Create and cache inputs
func_type = relay.transform.InferTypeLocal(relay_mod[relay_mod.get_global_vars()[0]])
input_tensors = []
for input_type in func_type.arg_types:
orig_input_np = np.random.uniform(0, 10, size=list(map(int, input_type.shape))).astype(
input_type.dtype
)
orig_input_np = np.arange(0, orig_input_np.size, dtype=input_type.dtype).reshape(
orig_input_np.shape
)
input_tensors.append(tvm.nd.array(orig_input_np, device))
ret_type = func_type.ret_type
def get_output_tensor() -> Tuple[tvm.nd.NDArray, tvm.nd.NDArray]:
numpy_init = np.random.uniform(0, 1000, size=list(map(int, ret_type.shape))).astype(
ret_type.dtype
)
return tvm.nd.array(numpy_init, device)
def run_and_get_output(tile_size: Optional[int]) -> np.ndarray:
returned_primfunc = TestRandomRelayE2ECorrectness.get_primfunc(
extracted_task, tile_size
)
output_tensor = get_output_tensor()
run_primfunc(returned_primfunc, target, [*input_tensors, output_tensor])
return output_tensor.numpy()
# Passing None, we basically do not apply the custom rule we have created
# and instead use the old default schedule which is the ground truth.
ground_truth_np = run_and_get_output(None)
for tile_size in tile_sizes:
experimental_np = run_and_get_output(tile_size)
np.testing.assert_allclose(ground_truth_np, experimental_np)
(
input_shape,
implicit_reshape_info,
dtype,
tile_sizes,
num_additional_ops,
) = tvm.testing.parameters(
*itertools.product(
# input_shape: Each has ~10k elements, should take single microseconds on modern gpu
[
[12, 48, 18],
[890, 14],
[10, 12, 2, 5, 3, 3],
],
# implicit_reshape_info: Implicit reshape conditions.
# None is do no implicit reshape, (0, 2) means divide axis 0 in half, e.g. AB --> A2aB
[None, (0, 2), (1, 2)],
# dtype: dtypes to test, should not matter that much
["float16"],
# tile_sizes: Tile sizes to try
[[8, 7]],
# num_additional_ops: number of non-layout transform ops to include and may be fused
[5],
)
)
@tvm.testing.requires_gpu
def test_all_test_case(
self,
input_shape,
implicit_reshape_info,
dtype,
tile_sizes,
num_additional_ops,
):
"""Tests the product of all conditions `repeat_per_condition` times."""
# Generate random module of fusable ops + layout transform and extract fused layout transform task
full_mod = self.generate_test_case(
input_shape, implicit_reshape_info, dtype, num_additional_ops
)
# Fused layout transform task
extracted_task = extract_layout_transform_task(full_mod, TARGET)
self.verify_layout_transform_task(extracted_task, TARGET, tile_sizes)
@tvm.testing.requires_gpu
class TestManualCases:
def assert_extracted_equals_expected(
self, relay_mod: tvm.IRModule, expected_mod: tvm.IRModule, tile_size: int
):
extracted_task = extract_layout_transform_task(relay_mod, TARGET)
dispatched_mod = extracted_task.dispatched[0]
sch = tvm.tir.Schedule(dispatched_mod)
block = sch.get_block("T_layout_trans")
output_sch = cuda_layout_transform_schedule_rule(sch, block, [tile_size])[0]
assert output_sch.mod.script() == expected_mod.script()
def test_simple_tiling(self):
mod = create_relay_module([1, 32, 32, 32], "float16", [("NCHW", "NHWC")])
# Main things to notice:
# - two blocks each with 16, 16 extents which write/read shared mem
# - coalesced accesses in inner loop of global memory buffer for both
# fmt: off
@I.ir_module
class ExpectedModule:
@T.prim_func
def main(p0: T.Buffer((T.int64(1), T.int64(32), T.int64(32), T.int64(32)), "float16"), T_layout_trans: T.Buffer((T.int64(1), T.int64(32), T.int64(32), T.int64(32)), "float16")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
p0_shared = T.alloc_buffer((T.int64(1), T.int64(32), T.int64(32), T.int64(32)), "float16", scope="shared")
for ax0_ax2_ax1_0_ax3_0_fused in T.thread_binding(T.int64(128), thread="blockIdx.x"):
for ax3_1_fused_0_ax3_1_fused_1_fused in T.thread_binding(T.int64(16), thread="threadIdx.x"):
for ax1_1_fused_0_ax1_1_fused_1_fused in range(T.int64(16)):
with T.block("p0_shared"):
v0 = T.axis.spatial(T.int64(1), T.int64(0))
v1 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused % T.int64(4) // T.int64(2) * T.int64(16) + ax1_1_fused_0_ax1_1_fused_1_fused)
v2 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused // T.int64(4))
v3 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused % T.int64(2) * T.int64(16) + ax3_1_fused_0_ax3_1_fused_1_fused)
T.reads(p0[v0, v1, v2, v3])
T.writes(p0_shared[v0, v1, v2, v3])
p0_shared[v0, v1, v2, v3] = p0[v0, v1, v2, v3]
for ax0_ax1_fused_0 in range(T.int64(16)):
for ax0_ax1_fused_1 in T.thread_binding(T.int64(16), thread="threadIdx.x"):
with T.block("T_layout_trans"):
v_ax0 = T.axis.spatial(T.int64(1), T.int64(0))
v_ax1 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused // T.int64(4))
v_ax2 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused % T.int64(2) * T.int64(16) + (ax0_ax1_fused_0 * T.int64(16) + ax0_ax1_fused_1) // T.int64(16))
v_ax3 = T.axis.spatial(T.int64(32), ax0_ax2_ax1_0_ax3_0_fused % T.int64(4) // T.int64(2) * T.int64(16) + (ax0_ax1_fused_0 * T.int64(16) + ax0_ax1_fused_1) % T.int64(16))
T.reads(p0_shared[v_ax0, v_ax3, v_ax1, v_ax2])
T.writes(T_layout_trans[v_ax0, v_ax1, v_ax2, v_ax3])
T.block_attr({"dst_layout": "NHWC", "input_shape": [1, 32, 32, 32], "schedule_rule": "layout_transform", "src_layout": "NCHW"})
T_layout_trans[v_ax0, v_ax1, v_ax2, v_ax3] = T.if_then_else(v_ax0 < T.int64(1) and v_ax3 < T.int64(32) and v_ax1 < T.int64(32) and v_ax2 < T.int64(32), p0_shared[v_ax0, v_ax3, v_ax1, v_ax2], T.float16(0))
self.assert_extracted_equals_expected(mod, ExpectedModule, 16)
def test_simple_implicit_reshape(self):
mod = create_relay_module([1, 32, 32, 32], "float16", [("NCHW", "NCHW4c")])
# Main things to notice:
# - two blocks each with 16, 16 extents which write/read shared mem
# - coalesced accesses in inner loop of global memory buffer for both
# - an implicit reshape is done (see p0_shared)
# fmt: off
@I.ir_module
class ExpectedModule:
@T.prim_func
def main(p0: T.Buffer((T.int64(1), T.int64(32), T.int64(32), T.int64(32)), "float16"), T_layout_trans: T.Buffer((T.int64(1), T.int64(8), T.int64(32), T.int64(32), T.int64(4)), "float16")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
p0_shared = T.alloc_buffer((T.int64(1), T.int64(8), T.int64(4), T.int64(32), T.int64(32)), "float16", scope="shared")
for ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused in T.thread_binding(T.int64(128), thread="blockIdx.x"):
for ax3_1_fused_0_ax3_1_fused_1_fused in T.thread_binding(T.int64(16), thread="threadIdx.x"):
for ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused in range(T.int64(16)):
with T.block("p0_shared"):
v_ax0 = T.axis.spatial(T.int64(1), T.int64(0))
v_ax1 = T.axis.spatial(T.int64(8), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused // T.int64(16))
v_ax2 = T.axis.spatial(T.int64(32), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused % T.int64(16) * T.int64(2) + ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused // T.int64(8))
v_ax3 = T.axis.spatial(T.int64(32), ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused % T.int64(8) // T.int64(4) * T.int64(16) + ax3_1_fused_0_ax3_1_fused_1_fused)
v_ax4 = T.axis.spatial(T.int64(4), ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused % T.int64(4))
T.reads(p0[v_ax0, v_ax1 * T.int64(4) + v_ax4, v_ax2, v_ax3])
T.writes(p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3])
p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3] = p0[v_ax0, v_ax1 * T.int64(4) + v_ax4, v_ax2, v_ax3]
for ax0_ax1_ax2_fused_0 in range(T.int64(16)):
for ax0_ax1_ax2_fused_1 in T.thread_binding(T.int64(16), thread="threadIdx.x"):
with T.block("T_layout_trans"):
v_ax0 = T.axis.spatial(T.int64(1), T.int64(0))
v_ax1 = T.axis.spatial(T.int64(8), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused // T.int64(16))
v_ax2 = T.axis.spatial(T.int64(32), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused % T.int64(16) * T.int64(2) + (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) // T.int64(128))
v_ax3 = T.axis.spatial(T.int64(32), (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) % T.int64(128) // T.int64(4))
v_ax4 = T.axis.spatial(T.int64(4), (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) % T.int64(4))
T.reads(p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3])
T.writes(T_layout_trans[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4])
T.block_attr({"dst_layout": "NCHW4c", "input_shape": [1, 32, 32, 32], "schedule_rule": "layout_transform", "src_layout": "NCHW"})
T_layout_trans[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4] = T.if_then_else(v_ax0 < T.int64(1) and v_ax1 * T.int64(4) + v_ax4 < T.int64(32) and v_ax2 < T.int64(32) and v_ax3 < T.int64(32), p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3], T.float16(0))
self.assert_extracted_equals_expected(mod, ExpectedModule, 16)
def test_expected_fusion_post(self):
mod = create_relay_module(
[1, 32, 32, 32], "float16", [("NCHW", "NCHW4c"), OpPattern.BROADCAST]
)
# Main things to notice:
# - two blocks each with 16, 16 extents which write/read shared mem
# - coalesced accesses in inner loop of global memory buffer for both
# - an implicit reshape is done (see p0_shared)
# - an addition is inlined in the final block (p1 input)
# fmt: off
@I.ir_module
class ExpectedModule:
@T.prim_func
def main(p0: T.Buffer((T.int64(1), T.int64(32), T.int64(32), T.int64(32)), "float16"), p1: T.Buffer((), "float16"), T_add: T.Buffer((T.int64(1), T.int64(8), T.int64(32), T.int64(32), T.int64(4)), "float16")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
p0_shared = T.alloc_buffer((T.int64(1), T.int64(8), T.int64(4), T.int64(32), T.int64(32)), "float16", scope="shared")
for ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused in T.thread_binding(T.int64(128), thread="blockIdx.x"):
for ax3_1_fused_0_ax3_1_fused_1_fused in T.thread_binding(T.int64(16), thread="threadIdx.x"):
for ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused in range(T.int64(16)):
with T.block("p0_shared"):
v_ax0 = T.axis.spatial(T.int64(1), T.int64(0))
v_ax1 = T.axis.spatial(T.int64(8), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused // T.int64(16))
v_ax2 = T.axis.spatial(T.int64(32), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused % T.int64(16) * T.int64(2) + ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused // T.int64(8))
v_ax3 = T.axis.spatial(T.int64(32), ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused % T.int64(8) // T.int64(4) * T.int64(16) + ax3_1_fused_0_ax3_1_fused_1_fused)
v_ax4 = T.axis.spatial(T.int64(4), ax2_1_ax3_0_1_ax4_1_fused_0_ax2_1_ax3_0_1_ax4_1_fused_1_fused % T.int64(4))
T.reads(p0[v_ax0, v_ax1 * T.int64(4) + v_ax4, v_ax2, v_ax3])
T.writes(p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3])
p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3] = p0[v_ax0, v_ax1 * T.int64(4) + v_ax4, v_ax2, v_ax3]
for ax0_ax1_ax2_fused_0 in range(T.int64(16)):
for ax0_ax1_ax2_fused_1 in T.thread_binding(T.int64(16), thread="threadIdx.x"):
with T.block("T_layout_trans"):
v_ax0 = T.axis.spatial(T.int64(1), T.int64(0))
v_ax1 = T.axis.spatial(T.int64(8), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused // T.int64(16))
v_ax2 = T.axis.spatial(T.int64(32), ax0_ax1_ax2_0_ax4_0_ax3_0_0_fused % T.int64(16) * T.int64(2) + (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) // T.int64(128))
v_ax3 = T.axis.spatial(T.int64(32), (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) % T.int64(128) // T.int64(4))
v_ax4 = T.axis.spatial(T.int64(4), (ax0_ax1_ax2_fused_0 * T.int64(16) + ax0_ax1_ax2_fused_1) % T.int64(4))
T.reads(p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3], p1[()])
T.writes(T_add[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4])
T.block_attr({"dst_layout": "NCHW4c", "input_shape": [1, 32, 32, 32], "schedule_rule": "layout_transform", "src_layout": "NCHW"})
T_add[v_ax0, v_ax1, v_ax2, v_ax3, v_ax4] = T.if_then_else(v_ax0 < T.int64(1) and v_ax1 * T.int64(4) + v_ax4 < T.int64(32) and v_ax2 < T.int64(32) and v_ax3 < T.int64(32), p0_shared[v_ax0, v_ax1, v_ax4, v_ax2, v_ax3], T.float16(0)) + p1[()]
self.assert_extracted_equals_expected(mod, ExpectedModule, 16)
if __name__ == "__main__":
tvm.testing.main()
| 24,429 | 50.868365 | 271 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_underlining.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
import pytest
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import (
ExprStmtDoc,
IdDoc,
OperationDoc,
OperationKind,
StmtBlockDoc,
)
from tvm.script.printer.doc_printer import to_python_script
from tvm.script import ir as I, tir as T
def make_path(name: str) -> ObjectPath:
return ObjectPath.root().attr(name)
def make_id_doc(name: str, path_name: Optional[str] = None) -> IdDoc:
if path_name is None:
path_name = name
doc = IdDoc(name)
doc.source_paths = [make_path(path_name)]
return doc
def format_script(s: str) -> str:
"""
Remove leading and trailing blank lines, and make the minimum idention 0
"""
s = s.strip("\n")
non_empty_lines = [line for line in s.splitlines() if line and not line.isspace()]
if not non_empty_lines:
# no actual content
return "\n"
line_indents = [len(line) - len(line.lstrip(" ")) for line in non_empty_lines]
spaces_to_remove = min(line_indents)
cleaned_lines = "\n".join(line[spaces_to_remove:] for line in s.splitlines())
if not cleaned_lines.endswith("\n"):
cleaned_lines += "\n"
return cleaned_lines.strip()
def test_underline_basic():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("bar"), make_id_doc("baz")])),
ExprStmtDoc(make_id_doc("qux")),
]
)
assert to_python_script(doc, path_to_underline=[make_path("baz")]) == format_script(
"""
foo
bar + baz
^^^
qux
"""
)
def test_underline_multiple_spans():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(make_id_doc("bar")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("foo"), make_id_doc("foo")])),
]
)
assert to_python_script(doc, path_to_underline=[make_path("foo")]) == format_script(
"""
foo
^^^
bar
foo + foo
^^^ ^^^
"""
)
def test_underline_multiple_spans_with_line_numbers():
doc = StmtBlockDoc(
[
ExprStmtDoc(make_id_doc("foo")),
ExprStmtDoc(make_id_doc("bar")),
ExprStmtDoc(OperationDoc(OperationKind.Add, [make_id_doc("foo"), make_id_doc("foo")])),
]
)
assert to_python_script(
doc, print_line_numbers=True, path_to_underline=[make_path("foo")]
) == format_script(
"""
1 foo
^^^
2 bar
3 foo + foo
^^^ ^^^
"""
)
def test_underline_multiline():
doc = StmtBlockDoc(
[
ExprStmtDoc(IdDoc("foo")),
ExprStmtDoc(IdDoc("bar")),
]
)
doc.source_paths = [make_path("whole_doc")]
assert to_python_script(doc, path_to_underline=[make_path("whole_doc")]) == format_script(
"""
foo
^^^
bar
^^^
"""
)
@pytest.mark.parametrize(
"to_underline, expected_text",
[
(
[0],
"""
x0
^^
x1
x2
(... 7 lines skipped ...)
""",
),
(
[1],
"""
x0
x1
^^
x2
x3
(... 6 lines skipped ...)
""",
),
(
[3],
"""
x0
x1
x2
x3
^^
x4
x5
(... 4 lines skipped ...)
""",
),
(
[4],
"""
(... 2 lines skipped ...)
x2
x3
x4
^^
x5
x6
(... 3 lines skipped ...)
""",
),
(
[6],
"""
(... 4 lines skipped ...)
x4
x5
x6
^^
x7
x8
x9
""",
),
(
[9],
"""
(... 7 lines skipped ...)
x7
x8
x9
^^
""",
),
(
[0, 9],
"""
x0
^^
x1
x2
(... 4 lines skipped ...)
x7
x8
x9
^^
""",
),
(
[0, 3, 9],
"""
x0
^^
x1
x2
x3
^^
x4
x5
x6
x7
x8
x9
^^
""",
),
(
[0, 6, 9],
"""
x0
^^
x1
x2
x3
x4
x5
x6
^^
x7
x8
x9
^^
""",
),
(
[33],
"""
x0
x1
x2
x3
x4
x5
x6
x7
x8
x9
""",
),
],
)
def test_print_two_context_lines(to_underline, expected_text):
doc = StmtBlockDoc(
[ExprStmtDoc(make_id_doc(f"x{i}", "yes" if i in to_underline else "no")) for i in range(10)]
)
result = to_python_script(doc, num_context_lines=2, path_to_underline=[make_path("yes")])
assert result == format_script(expected_text)
def test_underline_and_print_line_numbers():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc(f"line{i + 1}")) for i in range(12)])
result = to_python_script(doc, print_line_numbers=True, path_to_underline=[make_path("line6")])
assert (
result.strip()
== format_script(
"""
1 line1
2 line2
3 line3
4 line4
5 line5
6 line6
^^^^^
7 line7
8 line8
9 line9
10 line10
11 line11
12 line12
"""
).strip()
)
def test_underline_multi_object_paths():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc(f"line{i + 1}")) for i in range(10)])
result = to_python_script(
doc,
path_to_underline=[
make_path("line1"),
make_path("line3"),
make_path("line5"),
make_path("line7"),
make_path("line9"),
],
)
assert (
result.strip()
== format_script(
"""
line1
^^^^^
line2
line3
^^^^^
line4
line5
^^^^^
line6
line7
^^^^^
line8
line9
^^^^^
line10
"""
).strip()
)
def test_underline_and_print_line_numbers_with_context():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc(f"line{i + 1}")) for i in range(12)])
result = to_python_script(
doc, print_line_numbers=True, num_context_lines=2, path_to_underline=[make_path("line8")]
)
assert result == format_script(
"""
(... 5 lines skipped ...)
6 line6
7 line7
8 line8
^^^^^
9 line9
10 line10
(... 2 lines skipped ...)
"""
)
def test_underline_based_on_path_prefix():
doc = StmtBlockDoc([ExprStmtDoc(make_id_doc("foo")), ExprStmtDoc(make_id_doc("bar"))])
result = to_python_script(doc, path_to_underline=[make_path("foo").attr("x").attr("y")])
# There is no document that matches the desired path exactly,
# but path of "foo" is a prefix of the desired path, and thus should be underlined.
assert result == format_script(
"""
foo
^^^
bar
"""
)
def test_longer_prefix_must_win():
foo_x = IdDoc("foo_x")
foo_x.source_paths = [make_path("foo").attr("x")]
doc = StmtBlockDoc(
[ExprStmtDoc(make_id_doc("foo")), ExprStmtDoc(make_id_doc("bar")), ExprStmtDoc(foo_x)]
)
result = to_python_script(doc, path_to_underline=[make_path("foo").attr("x").attr("y")])
# "foo" should not be underlined because there is a document with a more specific path prefix
assert result == format_script(
"""
foo
bar
foo_x
^^^^^
"""
)
def test_underline_from_obj():
@T.prim_func
def func(a: T.int32, b: T.int32):
T.evaluate(a)
T.evaluate(b)
T.evaluate(a)
T.evaluate(b)
T.evaluate(a)
T.evaluate(b)
result = func.script(obj_to_underline=[func.params[0]])
assert result == format_script(
"""
# from tvm.script import tir as T
@T.prim_func
def main(a: T.int32, b: T.int32):
T.evaluate(a)
^
T.evaluate(b)
T.evaluate(a)
^
T.evaluate(b)
T.evaluate(a)
^
T.evaluate(b)
"""
)
def test_underline_from_multi_obj():
@T.prim_func
def func():
T.evaluate(-1)
T.evaluate(1)
T.evaluate(2)
T.evaluate(3)
T.evaluate(4)
T.evaluate(5)
T.evaluate(6)
T.evaluate(7)
result = func.script(
obj_to_underline=[
func.body.seq[1],
func.body.seq[3],
func.body.seq[5],
func.body.seq[7],
]
)
assert result == format_script(
"""
# from tvm.script import tir as T
@T.prim_func
def main():
T.evaluate(-1)
T.evaluate(1)
^^^^^^^^^^^^^
T.evaluate(2)
T.evaluate(3)
^^^^^^^^^^^^^
T.evaluate(4)
T.evaluate(5)
^^^^^^^^^^^^^
T.evaluate(6)
T.evaluate(7)
^^^^^^^^^^^^^
"""
)
def test_underline_func():
@T.prim_func
def func():
T.evaluate(0)
result = func.script(
path_to_underline=[
ObjectPath.root(),
]
)
assert result == format_script(
"""
# from tvm.script import tir as T
@T.prim_func
^^^^^^^^^^^^
def main():
^^^^^^^^^^^
T.evaluate(0)
^^^^^^^^^^^^^
"""
)
def test_underline_func_in_irmodule():
@I.ir_module
class irmodule:
@T.prim_func
def func():
T.evaluate(0)
result = irmodule.script(
path_to_underline=[
ObjectPath.root().attr("functions").map_value(irmodule.get_global_var("func")),
]
)
assert result == format_script(
"""
# from tvm.script import ir as I
# from tvm.script import tir as T
@I.ir_module
class Module:
@T.prim_func
^^^^^^^^^^^^
def func():
^^^^^^^^^^^
T.evaluate(0)
^^^^^^^^^^^^^
"""
)
def test_underline_irmodule():
@I.ir_module
class irmodule:
@T.prim_func
def func():
T.evaluate(0)
result = irmodule.script(
path_to_underline=[
ObjectPath.root(),
]
)
assert result == format_script(
"""
# from tvm.script import ir as I
# from tvm.script import tir as T
@I.ir_module
^^^^^^^^^^^^
class Module:
^^^^^^^^^^^^^
@T.prim_func
^^^^^^^^^^^^
def func():
^^^^^^^^^^^
T.evaluate(0)
^^^^^^^^^^^^^
"""
)
| 12,995 | 22.332136 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_tune_context.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test the tune context of meta schedule."""
import sys
import pytest
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.target import Target
from tvm.meta_schedule import TuneContext
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
def test_tune_context_create():
mod = Matmul
context = TuneContext(mod=mod, target=Target("llvm"), task_name="Test Task")
assert context.num_threads > 0
assert context.rand_state != -1
assert context.task_name == "Test Task"
assert context.mod == mod or tvm.ir.structural_equal(context.mod, mod)
if __name__ == "__main__":
tvm.testing.main()
| 2,219 | 35.393443 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_transform_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
def packed_index_map_func(m, n):
return m // 16, n // 16, m % 16, n % 16
@T.prim_func
def two_elementwise(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_intermediate_buffer(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((8, 8, 16, 16), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi // 16, vj // 16, vi % 16, vj % 16] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi // 16, vj // 16, vi % 16, vj % 16] + 1.0
@T.prim_func
def two_elementwise_transformed_input_buffer(
A: T.Buffer((8, 8, 16, 16), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi // 16, vj // 16, vi % 16, vj % 16] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def two_elementwise_transformed_output_buffer(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 8, 16, 16), "float32")
) -> None:
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi // 16, vj // 16, vi % 16, vj % 16] = B[vi, vj] + 1.0
@T.prim_func
def elementwise(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_transformed(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")) -> None:
for i in range(16384):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi // 128, vi % 128] = A[vi // 128, vi % 128] * 2.0
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer((1, 224, 224, 3), "float32"),
Weight: T.Buffer((7, 7, 3, 64), "float32"),
Conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_transformed(
Input: T.Buffer((1, 224, 224, 3), "float32"),
Weight: T.Buffer((7, 7, 3, 64), "float32"),
Conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
Input[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax0, ax1, ax2 in T.grid(12544, 64, 147):
with T.block("conv2d_nhwc"):
v0, v1, v2 = T.axis.remap("SSR", [ax0, ax1, ax2])
with T.init():
Conv2d_nhwc[0, v0 // 112, v0 % 112, v1] = T.float32(0)
Conv2d_nhwc[0, v0 // 112, v0 % 112, v1] = Conv2d_nhwc[0, v0 // 112, v0 % 112, v1] + PadInput[0, v0 // 112 * 2 + v2 // 21, v0 % 112 * 2 + v2 % 21 // 3, v2 % 3] * Weight[v2 // 21, v2 % 21 // 3, v2 % 3, v1]
@T.prim_func
def two_elementwise_unit_dim(A: T.Buffer((1, 128), "float32"), C: T.Buffer((1, 128), "float32")) -> None:
B = T.alloc_buffer((1, 128), "float32")
for i, j in T.grid(1, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
class TestTransformLayoutWithCacheWriteAndAxisSeparators(tvm.testing.CompareBeforeAfter):
"""
transform_layout with axis_separator on a buffer from cache_write should work as expected
"""
@pytest.fixture
def transform(self):
def transform(mod):
def transform_fn(x, y):
return [x // 32, y, tvm.te.AXIS_SEPARATOR, x % 32]
sch = tvm.tir.Schedule(mod, debug_mask="all")
block_rv = sch.get_block("T_add")
sch.cache_write(block_rv, 0, "global")
sch.transform_layout(block_rv, ("write", 0), transform_fn, pad_value=0.0)
return sch.mod
return transform
def before(
p0: T.Buffer((T.int64(33), T.int64(128)), "float32"),
p1: T.Buffer((T.int64(33), T.int64(128)), "float32"),
T_add: T.Buffer((T.int64(33), T.int64(128)), "float32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
for ax0, ax1 in T.grid(T.int64(33), T.int64(128)):
with T.block("T_add"):
v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1])
T.reads(p0[v_ax0, v_ax1], p1[v_ax0, v_ax1])
T.writes(T_add[v_ax0, v_ax1])
T_add[v_ax0, v_ax1] = p0[v_ax0, v_ax1] + p1[v_ax0, v_ax1]
def expected(p0: T.Buffer((T.int64(33), T.int64(128)), "float32"), p1: T.Buffer((T.int64(33), T.int64(128)), "float32"), T_add: T.Buffer((T.int64(33), T.int64(128)), "float32")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# with T.block("root"):
T_add_global = T.alloc_buffer((T.int64(2), T.int64(128), T.int64(32)), axis_separators=[2])
for axis0, axis1, axis2 in T.grid(T.int64(2), T.int64(128), T.int64(32)):
with T.block("T_add"):
v_axis0, v_axis1, v_axis2 = T.axis.remap("SSS", [axis0, axis1, axis2])
T.reads(p0[v_axis0 * T.int64(32) + v_axis2, v_axis1], p1[v_axis0 * T.int64(32) + v_axis2, v_axis1])
T.writes(T_add_global[v_axis0, v_axis1, v_axis2])
T_add_global[v_axis0, v_axis1, v_axis2] = T.if_then_else(v_axis0 == T.int64(1) and T.int64(1) <= v_axis2, T.float32(0), p0[v_axis0 * T.int64(32) + v_axis2, v_axis1] + p1[v_axis0 * T.int64(32) + v_axis2, v_axis1])
for ax0, ax1 in T.grid(T.int64(33), T.int64(128)):
with T.block("T_add_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(T_add_global[v0 // T.int64(32), v1, v0 % T.int64(32)])
T.writes(T_add[v0, v1])
T_add[v0, v1] = T_add_global[v0 // T.int64(32), v1, v0 % T.int64(32)]
# pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
# fmt: on
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_two_elementwise_transform_intermediate_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
block="B",
buffer="B",
index_map=packed_index_map_func,
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_intermediate_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_transform_layout_with_sampling():
sch = tir.Schedule(two_elementwise, debug_mask="all")
block_b = sch.get_block("B")
loop = sch.get_loops(block_b)[-1]
j0, j1, j2 = sch.sample_perfect_tile(loop, 3, decision=[4, 8, 4])
sch.transform_layout(block_b, ("write", 0), lambda i, j: (i, j // (j1 * j2), j % (j1 * j2)))
verify_trace_roundtrip(sch=sch, mod=two_elementwise, text_format="json")
def test_two_elementwise_transform_input_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func,
block="B",
buffer="A",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("read", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_input_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_transform_output_buffer(use_block_name):
sch = tir.Schedule(two_elementwise, debug_mask="all")
if use_block_name:
sch.transform_layout(
index_map=packed_index_map_func,
block="C",
buffer="C",
)
else:
block = sch.get_block("C")
sch.transform_layout(block, ("write", 0), packed_index_map_func)
tvm.ir.assert_structural_equal(two_elementwise_transformed_output_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise)
def test_two_elementwise_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
index_map = lambda i, j: (i, j)
if use_block_name:
sch.transform_layout(
index_map=index_map,
block="B",
buffer="B",
)
else:
block = sch.get_block("B")
sch.transform_layout(block, ("write", 0), index_map)
tvm.ir.assert_structural_equal(two_elementwise_unit_dim, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_simplify():
sch = tir.Schedule(two_elementwise, debug_mask="all")
i, j = sch.get_loops(sch.get_block("C"))
i, i_inner = sch.split(i, factors=[None, 16])
j, j_inner = sch.split(j, factors=[None, 16])
sch.reorder(
i,
j,
i_inner,
j_inner,
)
block_outer = sch.blockize(i_inner)
B = sch.cache_read(block_outer, 0, "global")
sch.transform_layout(B, ("write", 0), lambda i, j: (i // 16, j // 16, i % 16, j % 16))
@T.prim_func
def ref(B: T.Buffer((8, 8, 16, 16), "float32"), C: T.Buffer((128, 128), "float32")):
for i_0, j_0 in T.grid(8, 8):
with T.block("C_o"):
vi_o, vj_o = T.axis.remap("SS", [i_0, j_0])
T.reads(B[vi_o, vj_o, 0:16, 0:16])
T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16])
for i_1, j_1 in T.grid(16, 16):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i_1, j_1])
T.reads(B[vi_o, vj_o, vi, vj])
T.writes(C[vi_o * 16 + vi, vj_o * 16 + vj])
C[vi_o * 16 + vi, vj_o * 16 + vj] = B[vi_o, vj_o, vi, vj] + T.float32(1)
# Without simplification
# T.reads(B[vi // 16 + vi_o, vj // 16 + vj_o, vi % 16, vj % 16])
# C[...] = B[vi // 16 + vi_o, vj // 16 + vj_o, vi % 16, vj % 16] + T.float32(1)
tvm.ir.assert_structural_equal(ref.body.block.body, sch.get(sch.get_loops(block_outer)[0]))
def test_var_args_sugar():
@T.prim_func
def summation_3d(
A: T.Buffer((1024, 1024, 32), "float32"), B: T.Buffer((1,), "float32")
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk]
@T.prim_func
def summation_3d_split(
A: T.Buffer((1024, 1024, 8, 4), "float32"), B: T.Buffer((1,), "float32")
) -> None:
B[0] = 0
for i, j, k in T.grid(1024, 1024, 32):
with T.block("compute"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[0] = B[0] + A[vi, vj, vk // 4, vk % 4]
sch = tir.Schedule(summation_3d, debug_mask="all")
sch.transform_layout(
index_map=lambda *indices, k: [*indices, k // 4, k % 4], block="compute", buffer="A"
)
tvm.ir.assert_structural_equal(summation_3d_split, sch.mod["main"])
def test_transform_block_layout_basic(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
tvm.ir.assert_structural_equal(elementwise_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_transform_block_layout_conv2d_nhwc(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 + h * 112 + w, co, rh * 7 * 3 + rw * 3 + rc),
)
tvm.ir.assert_structural_equal(conv2d_nhwc_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_transform_block_layout_unit_dim(use_block_name):
sch = tir.Schedule(two_elementwise_unit_dim, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (j, i))
@T.prim_func
def two_elementwise_unit_dim_transformed(
A: T.Buffer((1, 128), "float32"), C: T.Buffer((1, 128), "float32")
) -> None:
B = T.alloc_buffer((1, 128), "float32")
for j, i in T.grid(128, 1):
with T.block("B"):
vj, vi = T.axis.remap("SS", [j, i])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
tvm.ir.assert_structural_equal(two_elementwise_unit_dim_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=two_elementwise_unit_dim)
def test_transform_block_layout_fail_non_affine(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(block, lambda i, j: (i + j,))
def test_transform_block_layout_fail_mixed_iter_type(use_block_name):
sch = tir.Schedule(conv2d_nhwc, debug_mask="all")
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
with pytest.raises(tir.ScheduleError):
sch.transform_block_layout(
block,
lambda n, h, w, co, rh, rw, rc: (n * 112 * 112 + h * 112 + w, co * 7 + rh, rw * 3 + rc),
)
def test_transform_block_layout_int64_extent(use_block_name):
@T.prim_func
def elementwise_int64_extent(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
) -> None:
for i, j in T.grid(T.int64(128), T.int64(128)):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def elementwise_int64_extent_transformed(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
) -> None:
for i in range(T.int64(16384)):
with T.block("B"):
vi = T.axis.remap("S", [i])
B[vi // T.int64(128), vi % T.int64(128)] = (
A[vi // T.int64(128), vi % T.int64(128)] * 2.0
)
sch = tir.Schedule(elementwise_int64_extent, debug_mask="all")
block = "B" if use_block_name else sch.get_block("B")
sch.transform_block_layout(block, lambda i, j: (i * 128 + j,))
tvm.ir.assert_structural_equal(elementwise_int64_extent_transformed, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_int64_extent)
class BasePaddingCompare(tvm.testing.CompareBeforeAfter):
pad_value = tvm.testing.parameter(None)
transformed_buffer = tvm.testing.parameter("A")
index_map = tvm.testing.parameter(lambda i: [i // 4, i % 4])
assume_injective_transform = tvm.testing.parameter(False)
@pytest.fixture
def transform(self, pad_value, transformed_buffer, index_map, assume_injective_transform):
def transform(mod):
sch = tir.Schedule(mod)
sch.transform_layout(
"block",
transformed_buffer,
index_map,
pad_value=pad_value,
assume_injective_transform=assume_injective_transform,
)
return sch.mod
return transform
class TestNoPadding(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value."""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi // 4, vi % 4] = 0
class TestNoPaddingMultipleUsage(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but the buffer A shows up in multiple
locations. To remain internally consistent, all instances of the
buffer should be rewritten.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi // 4, vi % 4] = 0
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("other"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestNoPaddingOpaqueBlock(BasePaddingCompare):
"""Transformations without padding do not depend on pad_value.
Like TestNoPadding, but buffer access is done in an opaque block.
"""
pad_value = tvm.testing.parameter(None, 42)
def before():
A = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
A[i] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(16):
with T.block("block"):
A[i // 4, i % 4] = 0
class TestErrorIfPaddingForbidden(BasePaddingCompare):
"""Unless padding is explicitly enabled, should raise error"""
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError
class TestImplicitPaddingAssumeInjective(BasePaddingCompare):
"""When pad_value is None and assume_injective_transform is set, the buffer can be implicitly
padded. The padded region is not accessed because the original loop extent is not changed.
"""
assume_injective_transform = tvm.testing.parameter(True)
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
def expected():
A = T.alloc_buffer([4, 4], "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi // 4, vi % 4] = 0
class TestErrorOnWrongPaddingType(BasePaddingCompare):
"""The padding must have the same dtype as the buffer"""
pad_value = tvm.testing.parameter(tir.IntImm("int8", 0))
def before():
A = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError
class TestErrorOnNonMatchingTypes(BasePaddingCompare):
"""The padding must have the same dtype as the buffer"""
pad_value = tvm.testing.parameter(0)
def before():
A = T.alloc_buffer(14, "float32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
expected = tvm.tir.schedule.schedule.ScheduleError
class TestPaddedTransformIfThenElse(BasePaddingCompare):
"""Use if_then_else to represent padding, if possible.
For a block that is a producer of the pre-transformation buffer,
which visits all indices according to a row-major traversal, and
which has no effect other than producing the transformed buffer,
transform the loop iterators to be a row-major traversal of the
post-transformation buffer, with padding represented by
`T.if_then_else`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
dtype = tvm.testing.parameter("int32", "int8")
@tvm.testing.fixture
def before(self, dtype):
@T.prim_func
def func(A: T.Buffer(14, dtype)):
B = T.alloc_buffer(14, dtype)
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
return func
@tvm.testing.fixture
def expected(self, dtype, pad_value):
pad_value = tir.IntImm(dtype, pad_value)
@T.prim_func
def func(A: T.Buffer(14, dtype)):
B = T.alloc_buffer([4, 4], dtype)
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, pad_value, A[vi * 4 + vj], dtype=dtype
)
return func
class TestPaddedTransformWithoutLoop(BasePaddingCompare):
"""Handle padded writes without a loop
The statement being replaced may be something other than a
for-loop, such as if a loop has already been unrolled.
"""
pad_value = tvm.testing.parameter(0)
def before(A: T.Buffer(14, "int32")):
with T.block("root"):
T.reads()
T.writes()
with T.block("block"):
A[0] = 0
def expected(A: T.Buffer((4, 4), "int32")):
with T.block("block"):
A[0, 0] = 0
for i, j in T.grid(4, 4):
with T.block("buffer_A_padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
A[vi, vj] = 0
class TestPaddedTransformIfThenElseReduction(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElse, but with a reduction axis"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer((14, 32), "int32")):
B = T.alloc_buffer(14, "int32")
for i, k in T.grid(14, 32):
with T.block("block"):
vi, vk = T.axis.remap("SR", [i, k])
with T.init():
B[vi] = 0
B[vi] = B[vi] + A[vi, vk]
def expected(A: T.Buffer((14, 32), "int32")):
B = T.alloc_buffer([4, 4], "int32")
for i, j, k in T.grid(4, 4, 32):
with T.block("block"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
B[vi, vj] = T.if_then_else(vi == 3 and 2 <= vj, 0, 0, dtype="int32")
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, 0, B[vi, vj] + A[vi * 4 + vj, vk], dtype="int32"
)
class TestPaddedTransformIfThenElseReductionOpaque(BasePaddingCompare):
"""Like TestPaddedTransformIfThenElseReduction, but with opaque blocks"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer((14, 32), "int32")):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
B[i] = 0
for k in T.serial(32):
with T.block("block"):
B[i] = B[i] + A[i, k]
def expected(A: T.Buffer((14, 32), "int32")):
B = T.alloc_buffer([4, 4], "int32")
for i, j in T.grid(4, 4):
B[i, j] = T.if_then_else(i == 3 and 2 <= j, 0, 0, dtype="int32")
for k in T.serial(32):
with T.block("block"):
B[i, j] = T.if_then_else(
i == 3 and 2 <= j, 0, B[i, j] + A[i * 4 + j, k], dtype="int32"
)
class TestPaddedTransformPostProcIfRequiredDueToSideEffects(BasePaddingCompare):
"""Set the transformation padding in a post-processing block.
Like TestPaddedTransformIfThenElse, but the block that produces B
also has the effect of setting `C`.
"""
pad_value = tvm.testing.parameter(0)
transformed_buffer = tvm.testing.parameter("B")
def before(A: T.Buffer(14, "int32")):
B = T.alloc_buffer(14, "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
C[vi] = 0
def expected(A: T.Buffer(14, "int32")):
B = T.alloc_buffer([4, 4], "int32")
C = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi // 4, vi % 4] = A[vi]
C[vi] = 0
for i, j in T.grid(4, 4):
with T.block("block_pad_B"):
vi, vj = T.axis.remap("SS", [i, j])
T.where(i == 3 and 2 <= j)
B[vi, vj] = 0
class TestPaddedTransformOfInputCreatesAssumption(BasePaddingCompare):
"""Transformation of an input buffer places T.assume locally"""
pad_value = tvm.testing.parameter(42)
def before(A: T.Buffer(14, "int32"), B: T.Buffer(14, "int32")):
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer((4, 4), "int32"), B: T.Buffer(14, "int32")):
for i, j in T.grid(4, 4):
with T.block("buffer_A_assumption"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(T.assume(not (vi == 3 and 2 <= vj) or A[vi, vj] == 42))
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestPaddedTransformNonConstantValue(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformIfThenElse, but the pad value depends on
the indices.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
sch.transform_layout(
"block",
"B",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: i + j,
)
return sch.mod
return transform
def before(A: T.Buffer(14, "int32")):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer(14, "int32")):
B = T.alloc_buffer([4, 4], "int32")
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
vi == 3 and 2 <= vj, vi + vj, A[vi * 4 + vj], dtype="int32"
)
@pytest.mark.xfail(reason="Not yet implemented")
class TestPaddedTransformRepeatedBufferElement(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformOfInputCreatesAssumption, but the pad
value depends on another portion of the buffer. In this case, the
padding at the end of A contains repeated elements from the
beginning of A.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
A = sch.get(sch.get_block("block")).reads[0].buffer
sch.transform_layout(
"block",
"A",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: A[(4 * i + j) % 14],
)
return sch.mod
return transform
def before(A: T.Buffer(14, "int32")):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer((4, 4), "int32")):
for i, j in T.grid(4, 4):
with T.block("buffer_A_assumption"):
vi, vj = T.axis.remap("SS", [i, j])
T.evaluate(
T.assume(
not (vi == 3 and 2 <= vj)
or A[vi, vj] == A[((4 * vi + j) % 14) // 4, ((4 * vi + j) % 14) % 4]
)
)
B = T.alloc_buffer(14, "int32")
for i in T.grid(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi // 4, vi % 4]
class TestPadValueMayNotReferenceOtherBuffer(tvm.testing.CompareBeforeAfter):
"""Allow an expression to specify the pad value.
Like TestPaddedTransformRepeatedBufferElement, but the pad value depends on
a different buffer, which is not allowed.
"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
A = sch.get(sch.get_block("block")).reads[0].buffer
other = tir.decl_buffer(1, A.dtype, name="other")
sch.transform_layout(
"block",
"A",
lambda i: [i // 4, i % 4],
pad_value=lambda i, j: other[0],
)
return sch.mod
return transform
def before(A: T.Buffer(14, "int32")):
B = T.alloc_buffer(14, "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
expected = tvm.tir.schedule.schedule.ScheduleError
class TestTransformLayoutWithVar(tvm.testing.CompareBeforeAfter):
"""Layout transform with dynamic parameter in transform"""
@pytest.fixture
def transform(self):
def transform(mod):
sch = tir.Schedule(mod)
n = sch.mod["main"].params[1]
sch.transform_layout(
"block",
"B",
lambda i: [i // n, i % n],
pad_value=0,
)
return sch.mod
return transform
def before(A: T.Buffer(16, "int32"), n: T.int32):
B = T.alloc_buffer(16, "int32")
for i in T.serial(16):
with T.block("block"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
def expected(A: T.Buffer(16, "int32"), n: T.int32):
B = T.alloc_buffer([(-16 % n + 16) // n, n], dtype="int32")
for i, j in T.grid((-16 % n + 16) // n, n):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = T.if_then_else(
# Checks if the transform introduced padding
-16 % n != 0
# If so, is vi in the last group (which may
# include padding).
and (vj + vi * n) // n == 16 // n
# And is vj within the padding
and 16 % n <= (vj + vi * n) % n,
0,
A[vj + vi * n],
dtype="int32",
)
class TestTransformWithAxisSeparators(BasePaddingCompare):
"""Axis separators may be specified in a transform"""
index_map = tvm.testing.parameter(lambda i: [i // 4, tvm.tir.IndexMap.AXIS_SEPARATOR, i % 4])
pad_value = tvm.testing.parameter(0)
def before(a: T.handle):
A = T.match_buffer(a, [14], "int32")
for i in T.serial(14):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 42
def expected(a: T.handle):
A = T.match_buffer(a, [4, 4], "int32", axis_separators=[1])
for i, j in T.grid(4, 4):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = T.if_then_else(vi == 3 and 2 <= vj, 0, 42, dtype="int32")
class TestTransformWithAxisSeparatorsOpaqueBlock(BasePaddingCompare):
"""Axis separators may be specified in a transform of opaque block"""
index_map = tvm.testing.parameter(lambda i: [i // 4, tvm.tir.IndexMap.AXIS_SEPARATOR, i % 4])
pad_value = tvm.testing.parameter(0)
def before(a: T.handle):
A = T.match_buffer(a, [14], "int32")
for i in T.serial(14):
with T.block("block"):
A[i] = 42
def expected(a: T.handle):
A = T.match_buffer(a, [4, 4], "int32", axis_separators=[1])
for i, j in T.grid(4, 4):
with T.block("block"):
A[i, j] = T.if_then_else(i == 3 and 2 <= j, 0, 42, dtype="int32")
def test_index_map_dtype_legalize():
"""Test dtype legalization of the index map indices."""
@T.prim_func
def func(A: T.Buffer(T.int64(58), "int32")):
for i in T.serial(T.int64(58)):
with T.block("block"):
vi = T.axis.remap("S", [i])
T.writes(A[vi])
A[vi] = 0
sch = tir.Schedule(func)
# # The following error is raised from the IterVar constructor without the dtype legalization.
# # TVMError: Check failed: dom->extent.dtype() == var.dtype() (int64 vs. int32) :
# # The dtype of the extent of an IterVar (int64) must match its associated Var's dtype (int32)
sch.transform_layout(
sch.get_block("block"), buffer="A", index_map=lambda h: [h // 8, h % 8], pad_value=0
)
def test_index_map_dtype_legalize_with_constant():
"""Legalization of inverse containing a constant output
The index map `lambda i,j: [i, j//8, j % 8]` has an inverse `lambda i,j,k: [i, 8*j+k]`.
"""
@T.prim_func
def func(A: T.Buffer(T.int64(16), "int32")):
for i in T.grid(T.int64(16)):
with T.block("block"):
vi = T.axis.remap("S", [i])
A[vi] = 0
sch = tir.Schedule(func)
# Triggering the error requires an IndexMap that introduces padding
func = lambda i: [
# And a constant to be one of the output indices.
tir.const(0, i.dtype),
(i + 1) // 8,
(i + 1) % 8,
]
# Previously, the legalization was only handled by propagating the
# dtype of the indices to the transformed indices. As a result,
# output indices whose value did not depend on the input index
# would be left with the incorrect dtype.
# Prior to the bugfix, this resulted in the following error is
# raised from the IterVar constructor.
#
# TVMError: Check failed: dom->extent.dtype() == var.dtype() (int64 vs. int32) :
# The dtype of the extent of an IterVar (int64) must match its associated Var's dtype (int32)
sch.transform_layout(block="block", buffer="A", index_map=func, pad_value=0)
def test_transform_layout_with_symbolic_bound():
# fmt: off
# pylint: disable=invalid-name,line-too-long,too-many-locals
@T.prim_func
def before(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
n = T.int64()
A = T.match_buffer(a, (T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16")
B = T.match_buffer(b, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
C = T.match_buffer(c, (T.int64(1), T.int64(32), T.int64(1), n), "float16")
for i0, i1, i2, i3, k in T.grid(T.int64(1), T.int64(32), T.int64(1), n, T.int64(128)):
with T.block("NT_matmul"):
v_i0, v_i1, v_i2, v_i3, v_k = T.axis.remap("SSSSR", [i0, i1, i2, i3, k])
T.reads(A[v_i0, v_i1, v_i2, v_k], B[v_i0, v_i1, v_i3, v_k])
T.writes(C[v_i0, v_i1, v_i2, v_i3])
with T.init():
C[v_i0, v_i1, v_i2, v_i3] = T.float16(0)
C[v_i0, v_i1, v_i2, v_i3] = C[v_i0, v_i1, v_i2, v_i3] + A[v_i0, v_i1, v_i2, v_k] * B[v_i0, v_i1, v_i3, v_k]
@T.prim_func
def after(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
n = T.int64()
A = T.match_buffer(a, (T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16")
B = T.match_buffer(b, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
C = T.match_buffer(c, (n * T.int64(32),), "float16")
for i0, i1, i2, i3, k in T.grid(T.int64(1), T.int64(32), T.int64(1), n, T.int64(128)):
with T.block("NT_matmul"):
v_i0, v_i1, v_i2, v_i3, v_k = T.axis.remap("SSSSR", [i0, i1, i2, i3, k])
T.reads(A[v_i0, v_i1, v_i2, v_k], B[v_i0, v_i1, v_i3, v_k])
T.writes(C[v_i1 * n + v_i3])
with T.init():
C[v_i1 * n + v_i3] = T.float16(0)
C[v_i1 * n + v_i3] = C[v_i1 * n + v_i3] + A[v_i0, v_i1, v_i2, v_k] * B[v_i0, v_i1, v_i3, v_k]
# pylint: enable=invalid-name,line-too-long,too-many-locals
# fmt: on
# pylint: disable=invalid-name
_, _, n, _ = before.buffer_map[before.params[1]].shape
sch = tvm.tir.Schedule(before)
block = sch.get_block("NT_matmul")
sch.transform_layout(
block,
("write", 0),
lambda x, y, z, w: x * 32 * n + y * n + z * n + w,
assume_injective_transform=True,
)
# pylint: enable=invalid-name
tvm.ir.assert_structural_equal(after, sch.mod["main"])
def test_transform_block_layout_with_symbolic_bound():
# fmt: off
# pylint: disable=invalid-name,line-too-long,too-many-locals
@T.prim_func
def before(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
n = T.int64()
A = T.match_buffer(a, (T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16")
B = T.match_buffer(b, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
C = T.match_buffer(c, (n * T.int64(32),), "float16")
for i0, i1, i2, i3, k in T.grid(T.int64(1), T.int64(32), T.int64(1), n, T.int64(128)):
with T.block("NT_matmul"):
v_i0, v_i1, v_i2, v_i3, v_k = T.axis.remap("SSSSR", [i0, i1, i2, i3, k])
T.reads(A[v_i0, v_i1, v_i2, v_k], B[v_i0, v_i1, v_i3, v_k])
T.writes(C[v_i1 * n + v_i3])
with T.init():
C[v_i1 * n + v_i3] = T.float16(0)
C[v_i1 * n + v_i3] = C[v_i1 * n + v_i3] + A[v_i0, v_i1, v_i2, v_k] * B[v_i0, v_i1, v_i3, v_k]
@T.prim_func
def after(a: T.handle, b: T.handle, c: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
n = T.int64()
A = T.match_buffer(a, (T.int64(1), T.int64(32), T.int64(1), T.int64(128)), "float16")
B = T.match_buffer(b, (T.int64(1), T.int64(32), n, T.int64(128)), "float16")
C = T.match_buffer(c, (n * T.int64(32),), "float16")
for ax0, ax1 in T.grid(n * T.int64(32), T.int64(128)):
with T.block("NT_matmul"):
v0, v1 = T.axis.remap("SR", [ax0, ax1])
T.reads(A[T.int64(0), v0 // n, T.int64(0), v1], B[T.int64(0), v0 // n, v0 % n, v1])
T.writes(C[v0])
with T.init():
C[v0] = T.float16(0)
C[v0] = C[v0] + A[T.int64(0), v0 // n, T.int64(0), v1] * B[T.int64(0), v0 // n, v0 % n, v1]
# pylint: enable=invalid-name,line-too-long,too-many-locals
# fmt: on
# pylint: disable=invalid-name
_, _, n, _ = before.buffer_map[before.params[1]].shape
sch = tvm.tir.Schedule(before)
block = sch.get_block("NT_matmul")
sch.transform_block_layout(
block,
lambda x, y, z, w, k: (
x * 32 * n + y * n + z * n + w,
k,
),
)
# pylint: enable=invalid-name
tvm.ir.assert_structural_equal(after, sch.mod["main"])
if __name__ == "__main__":
tvm.testing.main()
| 43,945 | 36.116554 | 228 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_split_host_device.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
from tvm.script import tir as T, ir as I
@tvm.testing.requires_cuda
def test_split_host_device_func_attr():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1")
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2")
s = te.create_schedule(A2.op)
xo, xi = s[A2].split(A2.op.axis[0], factor=8)
s[A2].bind(xo, te.thread_axis("blockIdx.x"))
s[A1].compute_at(s[A2], xo)
s[A1].set_scope("shared")
mod = tvm.lower(s, [A, A2])
cuda_target = tvm.target.Target("cuda", host="llvm")
mod = tvm.tir.transform.Apply(
lambda f: f.with_attr({"global_symbol": "test", "target": cuda_target})
)(mod)
mod = tvm.ir.transform.Sequential(
[
tvm.tir.transform.AnnotateDeviceRegions(),
tvm.tir.transform.SplitHostDevice(),
tvm.tir.transform.MakePackedAPI(),
tvm.tir.transform.LowerDeviceKernelLaunch(),
]
)(mod)
fdevice = mod["test_kernel"]
assert fdevice.attrs["global_symbol"] == "test_kernel"
assert fdevice.attrs["calling_conv"].value == 2
assert str(fdevice.attrs["target"]) == str(tvm.target.Target("cuda"))
assert fdevice.attrs["tir.is_global_func"].value
def test_ssa_across_entire_module():
"""The host and device functions should not share TIR vars
Any arguments that are passed from the host to the device should
be in terms of independent TIR variables.
"""
@I.ir_module
class before:
@T.prim_func
def main():
T.func_attr({"global_symbol": "main", "target": T.target("cuda", host="llvm")})
for i in range(16):
T.attr(0, "device_scope", 0)
for j in range(16):
T.evaluate(i)
after = tvm.ir.transform.Sequential(
[
tvm.tir.transform.AnnotateDeviceRegions(),
tvm.tir.transform.SplitHostDevice(),
tvm.tir.transform.LowerDeviceKernelLaunch(),
]
)(before)
loop_var = after["main"].body.loop_var
param_var = after["main_kernel"].params[0]
assert not loop_var.same_as(param_var)
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.SplitHostDevice()
class TestSplitHostDevice(BaseCompare):
"""SplitHostDevice divides a function at the "target" attribute"""
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("cuda", host="llvm -opt-level=0")})
T.attr(T.target("cuda"), "target", 0)
T.evaluate(n)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("cuda", host="llvm -opt-level=0")})
mod.main_kernel(n)
@T.prim_func
def main_kernel(n: T.int32):
T.func_attr(
{
"target": T.target("cuda"),
"tir.noalias": T.bool(True),
"tir.is_global_func": True,
}
)
T.evaluate(n)
return mod
class TestSplitHostDeviceWithoutFuncHostAttribute(BaseCompare):
"""Like TestSplitHostDevice, but no host specified in the host's target
The `T.attr` specifying the device still requires splitting out
the kernel.
"""
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("llvm")})
T.attr(T.target("cuda"), "target", 0)
T.evaluate(n)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("llvm")})
mod.main_kernel(n)
@T.prim_func
def main_kernel(n: T.int32):
T.func_attr(
{
"target": T.target("cuda"),
"tir.noalias": T.bool(True),
"tir.is_global_func": True,
}
)
T.evaluate(n)
return mod
class TestSplitHostDeviceWithoutDeviceRegion(BaseCompare):
"""Like TestSplitHostDevice, but no device regions to extract
Because MakePackedAPI/MakeUnpackedAPI still require both the
device and host, SplitHostDevice does not modify the "target"
attribute.
"""
def before():
T.func_attr({"target": T.target("ext_dev", host="llvm")})
T.evaluate(0)
expected = before
class TestSplitHostDeviceNameCollision(BaseCompare):
"""Like TestSplitHostDevice, but with the default name already taken
The default name is generated as `func.name + "_kernel"`. If this
name is already taken by another function in the IRModule, then
SplitHostDevice should select a different name.
"""
def before(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("cuda", host="llvm -opt-level=0")})
T.attr(T.target("cuda"), "target", 0)
T.evaluate(n)
@T.prim_func
def main_kernel():
T.func_attr({"target": T.target("llvm")})
T.evaluate(0)
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def main(n: T.int32):
T.func_attr({"target": T.target("cuda", host="llvm -opt-level=0")})
mod.main_kernel_1(n)
@T.prim_func
def main_kernel_1(n: T.int32):
T.func_attr(
{
"target": T.target("cuda"),
"tir.noalias": T.bool(True),
"tir.is_global_func": True,
}
)
T.evaluate(n)
@T.prim_func
def main_kernel():
T.func_attr({"target": T.target("llvm")})
T.evaluate(0)
return mod
if __name__ == "__main__":
tvm.testing.main()
| 7,282 | 29.345833 | 91 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_static_init.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import ctypes
import numpy as np
def test_static_callback():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
cp = te.thread_axis((0, 1), "cop")
finit = tvm.tir.StringImm("TVMBackendRunOnce")
ib.scope_attr(cp, "coproc_uop_scope", finit)
with ib.for_range(0, n, "i", kind="parallel") as i:
A[i] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
f(a)
np.testing.assert_equal(a.numpy(), np.ones(a.shape[0]))
def test_static_init():
dtype = "int64"
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.size_var("i")
ib = tvm.tir.ir_builder.create()
handle = tvm.tir.call_intrin("handle", "tir.tvm_static_handle")
ib.emit(tvm.tir.call_packed("test_static_callback", handle, Ab))
@tvm.register_func("test_static_callback")
def test_cb(sh, A):
assert isinstance(sh, ctypes.c_void_p)
return sh
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp"))
f = tvm.driver.build(mod, target="llvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f(a)
if __name__ == "__main__":
test_static_callback()
test_static_init()
| 2,311 | 32.507246 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_annotate_device_regions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T, ir as I
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.AnnotateDeviceRegions()
class TestAnnotateThreadExtent(BaseCompare):
"""Annotation inserted at the "thread_extent" attribute"""
def before(A: T.Buffer(16, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
i = T.launch_thread("threadIdx.x", 16)
A[i] = 0.0
def expected(A: T.Buffer(16, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
T.attr(T.target("cuda"), "target", 0)
i = T.launch_thread("threadIdx.x", 16)
A[i] = 0.0
class TestAnnotateDeviceScope(BaseCompare):
"""Annotation inserted at the "device_scope" attribute"""
def before(A: T.Buffer(1, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
T.attr(0, "device_scope", 0)
A[0] = 0.0
def expected(A: T.Buffer(1, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
T.attr(T.target("cuda"), "target", 0)
T.attr(0, "device_scope", 0)
A[0] = 0.0
if __name__ == "__main__":
tvm.testing.main()
| 2,008 | 33.050847 | 62 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.tir.tensor_intrin import arm_cpu, cuda, rocm, x86
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for (
i0_0,
i1_0,
i2_0,
i3_0,
i4_0_0,
i0_1,
i1_1,
i2_1,
i3_1,
i4_0_1,
i5_0,
i6_0,
i7_0,
i8_0,
i9_0_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_0_2,
i5_1,
i6_1,
i7_1,
i8_1,
i9_0_1,
i0_3,
i1_3,
i2_3,
i3_3,
i4_0_3,
) in T.grid(
1,
1,
2,
1,
1,
1,
4,
1,
14,
1,
1,
1,
4,
1,
1,
1,
4,
7,
1,
1,
1,
1,
1,
4,
1,
1,
1,
4,
4,
1,
):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2 * 4 + i2_3)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer, ic_f_inner = T.axis.remap("RR", [i7_0, i8_1])
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
T.block_attr({"meta_schedule.auto_tensorize": "dot_16x4_vnni"})
with T.init():
for i4_1 in T.serial(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block, ic_s_inner = T.axis.remap("SR", [i4_1, i9_1])
T.reads(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block],
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[
oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner
],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
"int32",
) * T.cast(
placeholder_1[
oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner
],
"int32",
)
@tvm.script.ir_module
class Conv2dNCHWcVNNIModuleTensorized:
@T.prim_func
def main(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0 in T.grid(
1, 1, 2, 1, 1, 1, 4, 1, 14, 1, 1, 1
):
for i1_2_init, i2_2_init, i2_3_init, i3_3_init in T.grid(4, 7, 4, 4):
with T.block("conv2d_NCHWc_int8_o_init"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2_init)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2_init * 4 + i2_3_init)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3_init)
oc_block_o = T.axis.spatial(1, 0)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
for i4_1 in T.vectorized(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_init] = 0
for (
i7_0,
i8_0,
i9_0_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_0_2,
i5_1,
i6_1,
i7_1,
i8_1,
i9_0_1,
i0_3,
i1_3,
i2_3,
i3_3,
i4_0_3,
) in T.grid(4, 1, 1, 1, 4, 7, 1, 1, 1, 1, 1, 4, 1, 1, 1, 4, 4, 1):
with T.block("conv2d_NCHWc_int8_o_update"):
n = T.axis.spatial(1, 0)
oc_chunk = T.axis.spatial(16, i1_1 * 4 + i1_2)
oh = T.axis.spatial(56, i2_0 * 28 + i2_2 * 4 + i2_3)
ow = T.axis.spatial(56, i3_1 * 4 + i3_3)
oc_block_o = T.axis.spatial(1, 0)
kh = T.axis.reduce(1, 0)
kw = T.axis.reduce(1, 0)
ic_outer, ic_f_inner = T.axis.remap("RR", [i7_0, i8_1])
ic_s_inner_o = T.axis.reduce(1, 0)
T.reads(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16],
placeholder[
n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4
],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
A = T.match_buffer(
placeholder[
n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4
],
[4],
dtype="uint8",
offset_factor=1,
)
B = T.match_buffer(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4],
[16, 4],
dtype="int8",
offset_factor=1,
)
C = T.match_buffer(
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16],
[16],
dtype="int32",
offset_factor=1,
)
A_u8x4 = A.vload([0], "uint8x4")
A_i32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x64 = B.vload([0, 0], dtype="int8x64")
B_i32x16 = T.reinterpret(B_i8x64, dtype="int32x16")
C_i32x16 = C.vload([0], dtype="int32x16")
C[T.ramp(0, 1, 16)] = T.call_llvm_pure_intrin(
T.llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512"),
T.uint32(3),
C_i32x16,
T.broadcast(A_i32, 16),
B_i32x16,
dtype="int32x16",
)
@tvm.script.ir_module
class DenseDP4ATiled:
@T.prim_func
def main(
X: T.Buffer((128, 128), "int8"),
W: T.Buffer((128, 128), "int8"),
compute: T.Buffer((128, 128), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
compute_local = T.alloc_buffer([128, 128], dtype="int32", scope="local")
X_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
W_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(1024):
with T.block("X_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused // 2 * 16 + ax0_ax1_fused // 64
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("W_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused % 2 * 64 + ax0_ax1_fused // 64
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for i2_0_1, i0_3, i1_3, i2_0_2, i0_4, i1_4 in T.grid(2, 4, 16, 8, 4, 1):
with T.block("compute_o"):
i = T.axis.spatial(128, i0_0_i1_0_fused // 2 * 16 + i0_3 * 4 + i0_4)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3,
)
k_o = T.axis.reduce(32, i2_0_0 * 16 + i2_0_1 * 8 + i2_0_2)
T.reads(
X_shared[i, k_o * 4 : k_o * 4 + 4],
W_shared[j, k_o * 4 : k_o * 4 + 4],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.auto_tensorize": "dp4a"})
with T.init():
with T.block("compute_init"):
T.reads()
T.writes(compute_local[i, j])
compute_local[i, j] = 0
for i2_1 in T.serial(4):
with T.block("compute"):
k = T.axis.reduce(4, i2_1)
T.reads(
compute_local[i, j],
X_shared[i, k_o * 4 + k],
W_shared[j, k_o * 4 + k],
)
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"})
compute_local[i, j] = compute_local[i, j] + T.cast(
X_shared[i, k_o * 4 + k], "int32"
) * T.cast(W_shared[j, k_o * 4 + k], "int32")
for ax0, ax1 in T.grid(16, 16):
with T.block("compute_local"):
v0 = T.axis.spatial(128, i0_0_i1_0_fused // 2 * 16 + ax0)
v1 = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ ax1,
)
T.reads(compute_local[v0, v1])
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
@tvm.script.ir_module
class DenseDP4ATensorized:
@T.prim_func
def main(
X: T.Buffer((128, 128), "int8"),
W: T.Buffer((128, 128), "int8"),
compute: T.Buffer((128, 128), "int32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
compute_local = T.alloc_buffer([128, 128], dtype="int32", scope="local")
X_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
W_shared = T.alloc_buffer([128, 128], dtype="int8", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(2, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for i0_3_init, i1_3_init, i0_4_init in T.grid(4, 16, 4):
with T.block("compute_o_init"):
i = T.axis.spatial(
128, i0_0_i1_0_fused // 2 * 16 + i0_3_init * 4 + i0_4_init
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3_init,
)
T.reads()
T.writes(compute_local[i, j])
T.block_attr({"meta_schedule.auto_tensorize": ""})
with T.block("compute_init"):
T.reads()
T.writes(compute_local[i, j])
compute_local[i, j] = 0
for i2_0_0 in T.serial(2):
for ax0_ax1_fused in T.serial(1024):
with T.block("X_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused // 2 * 16 + ax0_ax1_fused // 64
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("W_shared"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused % 2 * 64 + ax0_ax1_fused // 64
)
v1 = T.axis.spatial(128, i2_0_0 * 64 + ax0_ax1_fused % 64)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for i2_0_1, i0_3, i1_3, i2_0_2, i0_4, i1_4 in T.grid(2, 4, 16, 8, 4, 1):
with T.block("compute_o_update"):
i = T.axis.spatial(128, i0_0_i1_0_fused // 2 * 16 + i0_3 * 4 + i0_4)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ i1_3,
)
k_o = T.axis.reduce(32, i2_0_0 * 16 + i2_0_1 * 8 + i2_0_2)
T.reads(
compute_local[i, j],
X_shared[i, k_o * 4 : k_o * 4 + 4],
W_shared[j, k_o * 4 : k_o * 4 + 4],
)
T.writes(compute_local[i, j])
A = T.match_buffer(
X_shared[i, k_o * 4 : k_o * 4 + 4],
[4],
dtype="int8",
scope="shared",
align=4,
offset_factor=1,
)
B = T.match_buffer(
W_shared[j, k_o * 4 : k_o * 4 + 4],
[4],
dtype="int8",
scope="shared",
align=4,
offset_factor=1,
)
C = T.match_buffer(
compute_local[i, j],
[1],
dtype="int32",
scope="local",
align=4,
offset_factor=1,
)
C[0] = C[0] + T.call_pure_extern(
"__dp4a",
A[T.ramp(0, 1, 4)],
B[T.ramp(0, 1, 4)],
0,
dtype="int32",
)
for ax0, ax1 in T.grid(16, 16):
with T.block("compute_local"):
v0 = T.axis.spatial(128, i0_0_i1_0_fused // 2 * 16 + ax0)
v1 = T.axis.spatial(
128,
i0_0_i1_0_fused % 2 * 64
+ i0_1_i1_1_fused * 32
+ i0_2_i1_2_fused * 16
+ ax1,
)
T.reads(compute_local[v0, v1])
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
def _create_context(mod, target, postprocs) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=postprocs,
mutator_probs={},
),
task_name="test",
)
return ctx
def test_rewrite_tensorize_conv2d_nchwc_vnni():
mod = Conv2dNCHWcVNNIModuleTiled
target = tvm.target.Target("llvm -mcpu=cascadelake -num-cores 4")
ctx = _create_context(
mod,
target,
[
ms.postproc.RewriteReductionBlock(),
ms.postproc.RewriteTensorize(True),
],
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
for proc in ctx.space_generator.postprocs:
proc.apply(sch)
tvm.ir.assert_structural_equal(sch.mod, Conv2dNCHWcVNNIModuleTensorized)
def test_rewrite_tensorize_dense_dp4a():
mod = DenseDP4ATiled
target = tvm.target.Target("nvidia/geforce-rtx-3070")
ctx = _create_context(
mod,
target,
[
ms.postproc.RewriteCooperativeFetch(),
ms.postproc.RewriteReductionBlock(),
ms.postproc.RewriteTensorize(),
],
)
sch = tvm.tir.Schedule(mod, debug_mask="all")
sch.enter_postproc()
for proc in ctx.space_generator.postprocs:
proc.apply(sch)
tvm.ir.assert_structural_equal(sch.mod, DenseDP4ATensorized)
if __name__ == "__main__":
test_rewrite_tensorize_conv2d_nchwc_vnni()
test_rewrite_tensorize_dense_dp4a()
| 22,796 | 43.525391 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_verify_well_formed.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
def test_pass_simple():
@T.prim_func
def element_wise(
A: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
):
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
# It's a opaque block , so it can use outside variables
C[i, j] = B[i, j] * 2.0
assert tvm.tir.analysis.verify_well_formed(element_wise)
assert tvm.tir.analysis.verify_well_formed(tvm.IRModule.from_expr(element_wise))
def test_fail_use_out_loop_var():
@T.prim_func
def element_wise(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
):
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
# we cannot use `i` since it's defined outside the block
B[vi, vj] = A[i, vj] * 2.0
assert not tvm.tir.analysis.verify_well_formed(element_wise, assert_mode=False)
if __name__ == "__main__":
tvm.testing.main()
| 2,094 | 34.508475 | 84 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_doc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
In this test file, we want to make sure the Python code can construct
Doc objects, then access and modify their attributes correctly.
"""
import pytest
import tvm
from tvm.runtime import ObjectPath
from tvm.script.printer.doc import (
AssertDoc,
AssignDoc,
AttrAccessDoc,
CallDoc,
ClassDoc,
CommentDoc,
DictDoc,
DocStringDoc,
ExprStmtDoc,
ForDoc,
FunctionDoc,
IdDoc,
IfDoc,
IndexDoc,
LambdaDoc,
ListDoc,
LiteralDoc,
OperationDoc,
OperationKind,
ReturnDoc,
ScopeDoc,
SliceDoc,
StmtBlockDoc,
TupleDoc,
WhileDoc,
)
@pytest.mark.parametrize(
"value",
[None, "test", 0, 1, -2, 0.0, 1.5, -1.3, True, False],
)
def test_literal_doc_construction(value):
doc = LiteralDoc(value)
if isinstance(value, float):
# FloatImm cannot be compared with Python's float directly
assert float(doc.value) == pytest.approx(value)
else:
assert doc.value == value
def test_id_doc():
doc = IdDoc("name")
assert doc.name == "name"
def test_attr_access_doc():
target = IdDoc("x")
doc = AttrAccessDoc(target, "attribute")
assert doc.value == target
assert doc.name == "attribute"
@pytest.mark.parametrize(
"indices",
[
[],
[LiteralDoc(1)],
[LiteralDoc(2), IdDoc("x")],
[SliceDoc(LiteralDoc(1), LiteralDoc(2))],
[SliceDoc(LiteralDoc(1)), IdDoc("y")],
],
)
def test_index_doc(indices):
target = IdDoc("x")
doc = IndexDoc(target, indices)
assert doc.value == target
assert list(doc.indices) == indices
@pytest.mark.parametrize(
"args, kwargs",
[
([], {}),
([LiteralDoc("arg")], {}),
([LiteralDoc("arg"), IdDoc("x")], {}),
([], {"x": LiteralDoc("x")}),
([], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg"), IdDoc("x")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
],
)
def test_call_doc(args, kwargs):
target = IdDoc("x")
doc = CallDoc(target, *args, **kwargs)
assert doc.callee == target
assert list(doc.args) == args
assert dict(zip(doc.kwargs_keys, doc.kwargs_values)) == kwargs
@pytest.mark.parametrize(
"operands",
[
[],
[LiteralDoc(1)],
[LiteralDoc(2), IdDoc("x")],
[LiteralDoc(2), IdDoc("x"), LiteralDoc("y")],
],
)
def test_operation_doc(operands):
# Here we just test the contructor and attr visitor of OperationDoc
# so the choice of OperationKind doesn't matter
operator = OperationKind.Add
doc = OperationDoc(OperationKind.Add, operands)
assert doc.kind == operator
assert list(doc.operands) == operands
@pytest.mark.parametrize(
"args",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_lambda_doc(args):
body = LiteralDoc(1)
doc = LambdaDoc(args, body)
assert doc.body == body
assert list(doc.args) == args
@pytest.mark.parametrize(
"elements",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_tuple_doc(elements):
doc = TupleDoc(elements)
assert list(doc.elements) == elements
@pytest.mark.parametrize(
"elements",
[
[],
[IdDoc("x")],
[IdDoc("x"), IdDoc("y")],
],
)
def test_list_doc(elements):
doc = ListDoc(elements)
assert list(doc.elements) == elements
@pytest.mark.parametrize(
"content",
[
{},
{LiteralDoc("k"): IdDoc("v")},
{LiteralDoc("k"): IdDoc("v"), LiteralDoc("k2"): IdDoc("v2")},
],
)
def test_dict_doc(content):
doc = DictDoc(content)
assert dict(zip(doc.keys, doc.values)) == content
@pytest.mark.parametrize("start", [LiteralDoc(1), None])
@pytest.mark.parametrize("stop", [LiteralDoc(2), None])
@pytest.mark.parametrize("step", [LiteralDoc(3), None])
def test_slice_doc(start, stop, step):
doc = SliceDoc(start, stop)
assert doc.start == start
assert doc.stop == stop
def test_expr_doc_attr_access():
target = IdDoc("x")
attr = "test"
doc = target.attr(attr)
assert doc.value == target
assert doc.name == attr
@pytest.mark.parametrize(
"indices",
[
(),
LiteralDoc(1),
SliceDoc(LiteralDoc(1), LiteralDoc(2)),
(LiteralDoc(1),),
(LiteralDoc(2), IdDoc("x")),
(SliceDoc(LiteralDoc(1), LiteralDoc(2)),),
(SliceDoc(LiteralDoc(1)), IdDoc("y")),
],
)
def test_expr_doc_get_item(indices):
target = IdDoc("x")
doc = target[indices]
assert doc.value == target
if not isinstance(indices, tuple):
indices = (indices,)
assert tuple(doc.indices) == indices
@pytest.mark.parametrize(
"args, kwargs",
[
([], {}),
([LiteralDoc("arg")], {}),
([LiteralDoc("arg"), IdDoc("x")], {}),
([], {"x": LiteralDoc("x")}),
([], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
([LiteralDoc("arg"), IdDoc("x")], {"x": LiteralDoc("x"), "y": LiteralDoc("y")}),
],
)
def test_expr_doc_call_with(args, kwargs):
target = IdDoc("x")
doc = target.call(*args, **kwargs)
assert doc.callee == target
assert list(doc.args) == args
assert dict(zip(doc.kwargs_keys, doc.kwargs_values)) == kwargs
@pytest.mark.parametrize(
"stmts",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_stmt_block_doc(stmts):
doc = StmtBlockDoc(stmts)
assert list(doc.stmts) == stmts
@pytest.mark.parametrize(
"lhs, rhs, annotation",
[
(IdDoc("x"), IdDoc("y"), None),
(IdDoc("x"), None, IdDoc("int")),
(IdDoc("x"), IdDoc("y"), IdDoc("int")),
],
)
def test_assign_doc(lhs, rhs, annotation):
doc = AssignDoc(lhs, rhs, annotation)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert doc.annotation == annotation
@pytest.mark.parametrize(
"lhs, rhs, annotation",
[
(IdDoc("x"), None, None),
(TupleDoc([IdDoc("x"), IdDoc("y")]), None, IdDoc("int")),
(TupleDoc([IdDoc("x"), IdDoc("y")]), IdDoc("u"), IdDoc("int")),
],
)
def test_invalid_assign_doc(lhs, rhs, annotation):
with pytest.raises(ValueError) as e:
AssignDoc(lhs, rhs, annotation)
assert "AssignDoc" in str(e.value)
@pytest.mark.parametrize(
"else_branch",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
@pytest.mark.parametrize(
"then_branch",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_if_doc(then_branch, else_branch):
predicate = IdDoc("x")
if not then_branch and not else_branch:
with pytest.raises(ValueError) as e:
IfDoc(predicate, then_branch, else_branch)
assert "IfDoc" in str(e.value)
return
else:
doc = IfDoc(predicate, then_branch, else_branch)
assert doc.predicate == predicate
assert list(doc.then_branch) == then_branch
assert list(doc.else_branch) == else_branch
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_while_doc(body):
predicate = IdDoc("x")
doc = WhileDoc(predicate, body)
assert doc.predicate == predicate
assert list(doc.body) == body
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_for_doc(body):
lhs = IdDoc("x")
rhs = IdDoc("y")
doc = ForDoc(lhs, rhs, body)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert list(doc.body) == body
@pytest.mark.parametrize(
"lhs",
[
None,
IdDoc("x"),
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_scope_doc(lhs, body):
rhs = IdDoc("y")
doc = ScopeDoc(lhs, rhs, body)
assert doc.lhs == lhs
assert doc.rhs == rhs
assert list(doc.body) == body
def test_expr_stmt_doc():
expr = IdDoc("x")
doc = ExprStmtDoc(expr)
assert doc.expr == expr
@pytest.mark.parametrize(
"msg",
[
None,
LiteralDoc("msg"),
],
)
def test_assert_doc(msg):
test = IdDoc("x")
doc = AssertDoc(test, msg)
assert doc.test == test
assert doc.msg == msg
def test_return_doc():
value = IdDoc("x")
doc = ReturnDoc(value)
assert doc.value == value
@pytest.mark.parametrize(
"args",
[
[],
[AssignDoc(IdDoc("x"), None, IdDoc("int"))],
[
AssignDoc(IdDoc("x"), None, IdDoc("int")),
AssignDoc(IdDoc("y"), LiteralDoc(1), IdDoc("int")),
],
],
)
@pytest.mark.parametrize(
"decorators",
[
[],
[IdDoc("test")],
[IdDoc("test"), IdDoc("test2")],
],
)
@pytest.mark.parametrize(
"return_type",
[
None,
LiteralDoc(None),
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_function_doc(args, decorators, return_type, body):
name = IdDoc("name")
doc = FunctionDoc(name, args, decorators, return_type, body)
assert doc.name == name
assert list(doc.args) == args
assert list(doc.decorators) == decorators
assert doc.return_type == return_type
assert list(doc.body) == body
@pytest.mark.parametrize(
"decorators",
[
[],
[IdDoc("test")],
[IdDoc("test"), IdDoc("test2")],
],
)
@pytest.mark.parametrize(
"body",
[
[],
[ExprStmtDoc(IdDoc("x"))],
[ExprStmtDoc(IdDoc("x")), ExprStmtDoc(IdDoc("y"))],
],
)
def test_class_doc(decorators, body):
name = IdDoc("name")
doc = ClassDoc(name, decorators, body)
assert doc.name == name
assert list(doc.decorators) == decorators
assert list(doc.body) == body
@pytest.mark.parametrize(
"comment",
[
"",
"test comment 1",
"test comment 1\ntest comment 1",
],
)
def test_comment_doc(comment):
doc = CommentDoc(comment)
assert doc.comment == comment
@pytest.mark.parametrize(
"comment",
[
"",
"test comment 1",
"test comment 1\ntest comment 1",
],
)
def test_doc_string_doc(comment):
doc = DocStringDoc(comment)
assert doc.comment == comment
def test_stmt_doc_comment():
doc = ExprStmtDoc(IdDoc("x"))
assert doc.comment is None
comment = "test comment"
doc.comment = comment
# Make sure the previous statement doesn't set attribute
# as if it's an ordinary Python object.
assert "comment" not in doc.__dict__
assert doc.comment == comment
def test_doc_source_paths():
doc = IdDoc("x")
assert len(doc.source_paths) == 0
source_paths = [ObjectPath.root(), ObjectPath.root().attr("x")]
doc.source_paths = source_paths
# This should triggers the __getattr__ and gets a tvm.ir.container.Array
assert not isinstance(doc.source_paths, list)
assert list(doc.source_paths) == source_paths
doc.source_paths = []
assert len(doc.source_paths) == 0
if __name__ == "__main__":
tvm.testing.main()
| 12,465 | 21.063717 | 88 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_bf16_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
from tvm.script import tir as T
def get_before():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "bfloat16")
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def u16tof32(v):
uint32_v = v.astype("uint32")
uint32_v = uint32_v << tvm.tir.const(16, "uint32")
return T.reinterpret("float32", uint32_v)
def bf16tof32(v):
return u16tof32(T.reinterpret("uint16", v))
def f32tou16(v):
uint32_v = T.reinterpret("uint32", v)
rounding_bias = (uint32_v >> tvm.tir.const(16, "uint32")) & tvm.tir.const(1, "uint32")
rounding_bias += tvm.tir.const(0x7FFF, "uint32")
uint32_v = uint32_v + rounding_bias
return (uint32_v >> tvm.tir.const(16, "uint32")).astype("uint16")
def f32tobf16(v):
return T.reinterpret("bfloat16", f32tou16(v))
def get_after_compute_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = bf16tof32(A[i]) + bf16tof32(B[i])
D[i] = f32tobf16(T.exp(C[i]))
return After
def get_after_storage_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(Aptr: T.handle("uint16"), Bptr: T.handle("uint16"), Dptr: T.handle("uint16")):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint16", data=Aptr)
B = T.decl_buffer((100,), "uint16", data=Bptr)
D = T.decl_buffer((100,), "uint16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = u16tof32(A[i]) + u16tof32(B[i])
D[i] = f32tou16(T.exp(C[i]))
return After
def test_bf16_compute_legalize():
before = get_before()
expected = get_after_compute_legalize()
# run the transform twice to ensure we can afford to deal
# with this repeative optimizations
after = tvm.tir.transform.BF16ComputeLegalize()(before)
after = tvm.tir.transform.BF16ComputeLegalize()(after)
tvm.ir.assert_structural_equal(after, expected)
def test_bf16_storage_legalize():
before = get_after_compute_legalize()
after = tvm.tir.transform.BF16StorageLegalize()(before)
expected = get_after_storage_legalize()
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
test_bf16_storage_legalize()
| 4,032 | 32.890756 | 95 | py |
tvm | tvm-main/tests/python/unittest/test_evaluator_with_preproc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
import tvm.testing
import numpy as np
import pytest
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.testing.requires_cuda
@pytest.mark.parametrize("f_preproc", ["", "l2_cache_flush_cuda"])
def test_time_evalutor_with_preproc(f_preproc: str):
mod = tvm.IRModule.from_expr(matmul)
sch = tvm.tir.Schedule(mod)
blk = sch.get_block("matmul")
i, j, k = sch.get_loops(blk)
sch.bind(i, "blockIdx.x")
sch.bind(j, "threadIdx.x")
f = tvm.build(sch.mod["main"], target="cuda")
dev = tvm.cuda(0)
evaluator = f.time_evaluator(f.entry_name, dev, repeat=1000, number=1, f_preproc=f_preproc)
a = tvm.nd.array(np.random.rand(128, 128).astype("float32"), device=dev)
b = tvm.nd.array(np.random.rand(128, 128).astype("float32"), device=dev)
c = tvm.nd.array(np.zeros((128, 128)).astype("float32"), device=dev)
args = [a, b, c]
print("Evaluator (f_preproc={}):\t{:.5f}ms".format(f_preproc, evaluator(*args).mean * 1000))
if __name__ == "__main__":
test_time_evalutor_with_preproc("l2_cache_flush_cuda")
| 2,287 | 36.508197 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_arith_detect_clip_bound.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_basic():
a = te.var("a")
b = te.var("b")
c = te.var("c")
m = tvm.arith.detect_clip_bound(tvm.tir.all(a * 1 < b * 6, a - 1 > 0), [a])
tvm.testing.assert_prim_expr_equal(m[1], b * 6 - 1)
assert m[0].value == 2
m = tvm.arith.detect_clip_bound(tvm.tir.all(a * 1 < b * 6, a - 1 > 0), [a, b])
assert len(m) == 0
m = tvm.arith.detect_clip_bound(tvm.tir.all(a + 10 * c <= 20, b - 1 > 0), [a, b])
tvm.testing.assert_prim_expr_equal(m[1], 20 - 10 * c)
tvm.testing.assert_prim_expr_equal(m[2], 2)
m = tvm.arith.detect_clip_bound(tvm.tir.all(tvm.tir.Not(a * 1 > b * 6), a - 1 > 0), [a])
tvm.testing.assert_prim_expr_equal(m[1], b * 6)
m = tvm.arith.detect_clip_bound(tvm.tir.all(tvm.tir.Min(a, b) > 3, a - 10 < 0), [a, b])
tvm.testing.assert_prim_expr_equal(m[0], 4)
tvm.testing.assert_prim_expr_equal(m[1], 9)
tvm.testing.assert_prim_expr_equal(m[2], 4)
def test_trivial_eq():
a = te.var("a")
b = te.var("b")
m = tvm.arith.detect_clip_bound(b == 3, [a, b])
tvm.testing.assert_prim_expr_equal(m[2], 3)
tvm.testing.assert_prim_expr_equal(m[3], 3)
m = tvm.arith.detect_clip_bound(tvm.tir.all(a == 4, b == 3), [a, b])
tvm.testing.assert_prim_expr_equal(m[0], 4)
tvm.testing.assert_prim_expr_equal(m[1], 4)
tvm.testing.assert_prim_expr_equal(m[2], 3)
tvm.testing.assert_prim_expr_equal(m[3], 3)
if __name__ == "__main__":
test_basic()
| 2,280 | 39.017544 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_set_input_zero_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import tvm
from tvm import relay
import numpy as np
from tvm.contrib import graph_executor
from tvm import testing
import numpy as np
import pytest
dev = tvm.cpu(0)
target = tvm.target.Target("llvm")
def build_relay_module(func):
mod = tvm.IRModule()
mod["main"] = func
lib = relay.build(mod, target=target)
return graph_executor.GraphModule(lib["default"](dev))
@testing.requires_llvm
def test_simple_graph():
# Simple relay func:
# 1. y = x + 1
# 2. return y
shape = (2, 2)
x = relay.var("x", shape=shape, dtype="float32")
y = relay.add(x, relay.ones(shape, dtype="float32"))
func = relay.Function([x], y)
# Build 2 exactly same relay modules.
mod = build_relay_module(func)
mod_zero_copy = build_relay_module(func)
x_np = np.random.uniform(size=shape).astype(np.float32)
# Use set_input()
x_nd = tvm.nd.array(x_np, device=dev)
mod.set_input("x", x_nd)
mod.run()
# Use set_input_zero_copy()
x_nd_zero_copy = tvm.nd.array(x_np, device=dev)
index = mod_zero_copy.get_input_index("x")
mod_zero_copy.module["set_input_zero_copy"](index, x_nd_zero_copy)
mod_zero_copy.run()
# Expect get same output "x".
testing.assert_allclose(mod.get_output(0).numpy(), mod_zero_copy.get_output(0).numpy())
@testing.requires_llvm
def test_input_in_output():
# Relay func that input is also in output:
# 1. y = x + 1
# 2. return [x, y]
shape = (3, 4)
x = relay.var("x", shape=shape, dtype="float32")
y = relay.add(x, relay.ones(shape, dtype="float32"))
func = relay.Function([x], relay.expr.Tuple([x, y]))
# Build 2 exactly same relay modules.
mod = build_relay_module(func)
mod_zero_copy = build_relay_module(func)
x_np = np.random.uniform(size=shape).astype(np.float32)
# Use set_input()
x_nd = tvm.nd.array(x_np, device=dev)
mod.set_input("x", x_nd)
mod.run()
# Use set_input_zero_copy()
x_nd_zero_copy = tvm.nd.array(x_np, device=dev)
index = mod_zero_copy.get_input_index("x")
mod_zero_copy.module["set_input_zero_copy"](index, x_nd_zero_copy)
mod_zero_copy.run()
# Expect get same output "x".
testing.assert_allclose(mod.get_output(0).numpy(), mod_zero_copy.get_output(0).numpy())
@testing.requires_llvm
def test_reshape_after_input():
# Relay func that a reshape op follows immediately after input:
# 1. y = x + 1
# 2. return [x, y]
shape = (3, 4)
x = relay.var("x", shape=shape, dtype="float32")
y = relay.reshape(x, (1, 12))
z = relay.add(y, relay.ones((1, 12), dtype="float32"))
func = relay.Function([x], relay.expr.Tuple([x, y, z]))
# Build 2 exactly same relay modules.
mod = build_relay_module(func)
mod_zero_copy = build_relay_module(func)
x_np = np.random.uniform(size=shape).astype(np.float32)
# Use set_input()
x_nd = tvm.nd.array(x_np, device=dev)
mod.set_input("x", x_nd)
mod.run()
# Use set_input_zero_copy()
x_nd_zero_copy = tvm.nd.array(x_np, device=dev)
index = mod_zero_copy.get_input_index("x")
mod_zero_copy.module["set_input_zero_copy"](index, x_nd_zero_copy)
mod_zero_copy.run()
# Expect get same output "x".
testing.assert_allclose(mod.get_output(0).numpy(), mod_zero_copy.get_output(0).numpy())
# Expect get same output "y".
testing.assert_allclose(mod.get_output(1).numpy(), mod_zero_copy.get_output(1).numpy())
if __name__ == "__main__":
test_simple_graph()
test_input_in_output()
test_reshape_after_input()
| 4,408 | 30.949275 | 91 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm.testing
from tvm import ir, tir
from tvm.ir import Range
from tvm.script.ir_builder import IRBuilder
from tvm.script.ir_builder import tir as T
def _assert_print(obj, expected):
assert obj.script(verbose_expr=True).strip() == expected.strip()
def test_prim_func():
a = tir.Var("a", "handle")
b = tir.Var("b", "handle")
func = tir.PrimFunc(
params=[a, b],
ret_type=None,
buffer_map={
a: tir.decl_buffer(shape=[128, 128], dtype="float32", name="A"),
b: tir.decl_buffer(shape=[256, 256], dtype="float32", name="B"),
},
body=tir.Evaluate(0),
)
_assert_print(
func,
expected="""
# from tvm.script import tir as T
@T.prim_func
def main(A: T.Buffer((128, 128), "float32"), B: T.Buffer((256, 256), "float32")):
T.evaluate(0)""",
)
def test_prim_func_no_sugar_inlined_buffer():
a = tir.Var("a", "handle")
b = tir.Var("b", "handle")
func = tir.PrimFunc(
params=[a, b],
ret_type=None,
buffer_map={
a: tir.decl_buffer(shape=[128, 128], dtype="float32", name="A"),
b: tir.decl_buffer(shape=[256, 256], dtype="float32", name="B"),
},
body=tir.Evaluate(a),
)
_assert_print(
func,
expected="""
# from tvm.script import tir as T
@T.prim_func
def main(a: T.handle, B: T.Buffer((256, 256), "float32")):
A = T.match_buffer(a, (128, 128))
T.evaluate(a)
""",
)
def test_prim_func_no_sugar_shared_buffer_data():
a = tir.Var("a", "handle")
b = tir.Var("b", "handle")
buffer_data = tir.decl_buffer(shape=[128, 128], dtype="float32", name="A").data
func = tir.PrimFunc(
params=[a, b],
ret_type=None,
buffer_map={
a: tir.decl_buffer(shape=[128, 128], dtype="float32", name="A", data=buffer_data),
b: tir.decl_buffer(shape=[256, 256], dtype="float32", name="B", data=buffer_data),
},
body=tir.Evaluate(0),
)
_assert_print(
func,
expected="""
# from tvm.script import tir as T
@T.prim_func
def main(a: T.handle, b: T.handle):
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (256, 256), data=A.data)
T.evaluate(0)
""",
)
def test_block_realize():
i = tir.Var("i", "int32")
j = tir.Var("j", "int32")
k = tir.Var("k", "int32")
with IRBuilder() as ib:
with T.block(name="block", no_realize=False):
vi = ib.name("vi", T.axis.spatial(128, i))
vj = ib.name("vj", T.axis.spatial(64, j))
vk = ib.name("vk", T.axis.reduce(32, k))
T.reads()
T.writes()
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
i = T.int32()
j = T.int32()
k = T.int32()
with T.block("block"):
vi = T.axis.spatial(128, i)
vj = T.axis.spatial(64, j)
vk = T.axis.reduce(32, k)
T.reads()
T.writes()
T.evaluate(0)""",
)
def test_block():
i = tir.Var("i", "int32")
j = tir.Var("j", "int32")
k = tir.Var("k", "int32")
with IRBuilder() as ib:
with T.block(name="block", no_realize=False):
vi = ib.name("vi", T.axis.spatial(128, i))
vj = ib.name("vj", T.axis.spatial(64, j))
vk = ib.name("vk", T.axis.reduce(32, k))
T.reads()
T.writes()
T.evaluate(0)
obj = ib.get().block
_assert_print(
obj,
"""
with T.block("block", no_realize=True):
vi = T.axis.spatial(128)
vj = T.axis.spatial(64)
vk = T.axis.reduce(32)
T.reads()
T.writes()
T.evaluate(0)""",
)
def test_match_buffer_region():
src = tir.decl_buffer((128, 128), "float32", name="src")
tgt = tir.decl_buffer((64, 64), "float32", name="tgt")
obj = tir.MatchBufferRegion(
tgt,
tir.BufferRegion(
src,
[
Range(64, 128),
Range(64, 128),
],
),
)
_assert_print(
obj,
"""
src = T.Buffer((128, 128))
tgt = T.match_buffer(src[64:128, 64:128], (64, 64))
""",
)
def test_buffer():
a = tir.decl_buffer((128, 128), "float16", name="A")
_assert_print(
a,
"""A = T.Buffer((128, 128), "float16")
A""",
)
def test_buffer_region():
src = tir.decl_buffer((128, 128), "float32", name="src")
obj = tir.BufferRegion(
src,
[
Range(64, 128),
Range(64, 128),
],
)
_assert_print(
obj,
"""
src = T.Buffer((128, 128))
src[64:128, 64:128]
""",
)
def test_buffer_load():
a = tir.decl_buffer((128, 128), "float16", name="A")
obj = tir.BufferLoad(a, [128, 128])
_assert_print(
obj,
"""
A = T.Buffer((128, 128), "float16")
A[128, 128]
""",
)
def test_buffer_store():
a = tir.decl_buffer((128, 128), "float16", name="A")
with IRBuilder() as ib:
T.buffer_store(a, a[128, 128] + 1, [128, 128])
obj = ib.get()
_assert_print(
obj,
"""
A = T.Buffer((128, 128), "float16")
A[128, 128] = A[128, 128] + T.float16(1)
""",
)
def test_for():
with IRBuilder() as ib:
with T.grid(128, 128, 128) as (i, j, k):
ib.name_many(["i", "j", "k"], [i, j, k])
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
for i, j, k in T.grid(128, 128, 128):
T.evaluate(0)
""",
)
def test_let_stmt():
with IRBuilder() as ib:
with T.LetStmt(T.float32(10)) as v:
ib.name("v", v)
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
with T.LetStmt(T.float32(10)) as v:
T.evaluate(0)
""",
)
def test_attr_stmt():
with IRBuilder() as ib:
with T.attr("pragma", "unroll", 1):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
with T.attr("pragma", "unroll", 1):
T.evaluate(0)
""",
)
def test_assert_stmt():
with IRBuilder() as ib:
with T.Assert(True, "assertion"):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
with T.Assert(T.bool(True), "assertion"):
T.evaluate(0)
""",
)
def test_while():
with IRBuilder() as ib:
x = T.int32()
with T.While(x < 10):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
v = T.int32()
while v < 10:
T.evaluate(0)
""",
)
def test_allocate():
with IRBuilder() as ib:
with T.allocate([128, 128], "float32"):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
with T.allocate([128, 128], "float32", "global") as v:
T.evaluate(0)
""",
)
def test_allocate_with_decl_buffer_sugar():
with IRBuilder() as ib:
with T.allocate([128, 128], "float32") as buffer_data:
with T.decl_buffer([128, 128], "float32", data=buffer_data) as buffer:
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
with T.decl_buffer((128, 128)) as buffer:
T.evaluate(0)
""",
)
def test_allocate_with_decl_buffer_sugar_multi_usage():
with IRBuilder() as ib:
with T.allocate([128, 128], "float32") as buffer_data:
with T.decl_buffer([128, 128], "float32", data=buffer_data) as buffer:
T.evaluate(buffer_data)
obj = ib.get()
_assert_print(
obj,
"""
with T.decl_buffer((128, 128)) as buffer:
T.evaluate(buffer.data)
""",
)
def test_allocate_with_decl_buffer_no_sugar_mismatch():
with IRBuilder() as ib:
with T.allocate([128, 128], "float32") as buffer_data:
with T.decl_buffer([256, 256], "float32", data=buffer_data) as buffer:
T.evaluate(buffer_data)
obj = ib.get()
_assert_print(
obj,
"""
with T.allocate([128, 128], "float32", "global") as v:
buffer = T.decl_buffer((256, 256), data=v)
T.evaluate(v)
""",
)
def test_decl_buffer():
with IRBuilder() as ib:
with T.decl_buffer((10, 10), data=T.ptr("float32")):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
v = T.handle("float32", "global")
with T.decl_buffer((10, 10), data=v) as buffer:
T.evaluate(0)
""",
)
def test_prefetch():
a = tir.decl_buffer((128, 128), "float16", name="A")
with IRBuilder() as ib:
T.prefetch(a, [Range(0, 64), Range(0, 64)])
obj = ib.get()
_assert_print(
obj,
"""
A = T.Buffer((128, 128), "float16")
T.prefetch(A, [T.Range(0, 64), T.Range(0, 64)])
""",
)
def test_seq_stmt():
with IRBuilder() as ib:
with T.serial(10):
T.evaluate(1)
T.evaluate(2)
obj = ib.get().body
_assert_print(
obj,
"""
T.evaluate(1)
T.evaluate(2)
""",
)
def test_if_then_else():
with IRBuilder() as ib:
with T.If(T.int32() == 1):
with T.Then():
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
v = T.int32()
if v == 1:
T.evaluate(0)
""",
)
def test_evaluate():
with IRBuilder() as ib:
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
T.evaluate(0)
""",
)
def test_buffer_realize():
with IRBuilder() as ib:
a = tir.decl_buffer((128, 128), "float32", name="A")
with T.realize(a[0:128, 0:128], "test_storage_scope", True):
T.evaluate(0)
obj = ib.get()
_assert_print(
obj,
"""
A = T.Buffer((128, 128))
with T.realize(A[0:128, 0:128], "test_storage_scope"):
T.evaluate(0)
""",
)
def test_var():
a = tir.Var("a", "float32")
_assert_print(
a,
"""
a = T.float32()
a""",
)
def test_size_var():
a = tir.SizeVar("a", "float32")
_assert_print(
a,
"""
a = T.float32(is_size_var=True)
a""",
)
def test_iter_var():
a = tir.IterVar((0, 8), "a", iter_type=tir.IterVar.DataPar)
_assert_print(
a,
"""
a = T.int32()
T.iter_var(a, T.Range(0, 8), "DataPar", "")
""",
)
def test_string_imm():
s = tir.StringImm("str")
_assert_print(s, '"str"')
def test_cast():
obj = tir.Cast("float64", tir.Var("a", "float32"))
_assert_print(
obj,
"""
a = T.float32()
T.Cast("float64", a)
""",
)
def test_binary_arith():
a = tir.Var("a", "int32")
b = tir.Var("b", "int32")
for op, sign in [
(tir.Add, "+"),
(tir.Sub, "-"),
(tir.Mul, "*"),
(tir.Mod, "truncmod"),
(tir.FloorDiv, "//"),
(tir.FloorMod, "%"),
(tir.LT, "<"),
(tir.LE, "<="),
(tir.EQ, "=="),
(tir.NE, "!="),
(tir.GT, ">"),
(tir.GE, ">="),
]:
obj = op(a, b)
if sign.isalpha():
expected = """
a = T.int32()
b = T.int32()
T.{}(a, b)""".format(
sign
)
else:
expected = """
a = T.int32()
b = T.int32()
a {} b""".format(
sign
)
_assert_print(obj, expected)
def test_binary_arith_const():
a = tir.IntImm("int64", 3)
b = tir.IntImm("int64", 4)
for op, name in [
(tir.Add, "Add"),
(tir.Sub, "Sub"),
(tir.Mul, "Mul"),
(tir.Div, "Div"),
(tir.Mod, "truncmod"),
(tir.FloorDiv, "FloorDiv"),
(tir.FloorMod, "FloorMod"),
(tir.LT, "LT"),
(tir.LE, "LE"),
(tir.EQ, "EQ"),
(tir.NE, "NE"),
(tir.GT, "GT"),
(tir.GE, "GE"),
]:
obj = op(a, b)
expected = """
T.{}({}, {})""".format(
name, str(a), str(b)
)
_assert_print(obj, expected)
def test_int_div():
a = tir.Var("a", "int32")
b = tir.Var("b", "int32")
_assert_print(
tir.Div(a, b),
"""
a = T.int32()
b = T.int32()
T.Div(a, b)
""",
)
def test_logical():
a = tir.Var("a", "bool")
b = tir.Var("b", "bool")
_assert_print(
tir.And(a, b),
"""
a = T.bool()
b = T.bool()
a and b
""",
)
_assert_print(
tir.Or(a, b),
"""
a = T.bool()
b = T.bool()
a or b
""",
)
_assert_print(
tir.Not(a),
"""
a = T.bool()
not a
""",
)
def test_select():
obj = tir.Select(True, 0, 2)
_assert_print(
obj,
"""T.Select(T.bool(True), 0, 2)
""",
)
def test_ramp():
a = tir.Var("a", "int32")
obj = tir.Ramp(a, 1, 32)
_assert_print(
obj,
"""
a = T.int32()
T.Ramp(a, 1, 32)
""",
)
def test_broadcast():
obj = tir.Broadcast(0, 4)
_assert_print(
obj,
"""
T.Broadcast(0, 4)
""",
)
def test_let_expr():
x = tir.Var("x", "int32")
obj = tir.Let(x, 1, x + 1)
_assert_print(
obj,
"""
x = T.int32()
T.Let(x + 1, where={x: 1})
""",
)
def test_call():
obj = tir.atan(T.float32(1.0))
_assert_print(
obj,
"""
T.atan(T.float32(1))
""",
)
def test_comm_reducer():
obj = T.comm_reducer(lambda x, y: x + y, identity=[T.float32(0)])
_assert_print(
obj,
"""
T.comm_reducer(lambda x, y: x + y, [T.float32(0)])
""",
)
def test_any():
obj = tir.Any()
_assert_print(
obj,
"""
T.Any()
""",
)
def test_int_imm():
obj = T.int16(1)
_assert_print(
obj,
"""
T.int16(1)
""",
)
def test_float_imm():
obj = T.float16(1)
_assert_print(
obj,
"""
T.float16(1)
""",
)
def test_range():
obj = Range(0, 10)
_assert_print(
obj,
"""
T.Range(0, 10)
""",
)
def test_prim_type():
obj = ir.PrimType("float32")
_assert_print(obj, "T.float32")
def test_pointer_type():
obj = ir.PointerType(ir.PrimType("int32"), "global")
_assert_print(obj, 'T.handle("int32", "global")')
def test_tuple_type():
obj = ir.TupleType([ir.PrimType("float32"), ir.PrimType("int32")])
_assert_print(obj, "T.Tuple(T.float32, T.int32)")
def test_remap():
from tvm.script import tir as T
@T.prim_func
def block_with_remap_implicitly():
for i0, i1, i2, i3, i4, i5 in T.grid(128, 128, 128, 128, 128, 128):
with T.block("update"):
v0 = T.axis.spatial(128, i0 + 1)
v1 = T.axis.spatial(128, i1)
v2 = T.axis.reduce(128, i2)
v3 = T.axis.spatial(128, i3 - 1)
v4 = T.axis.reduce(128, i4)
v5 = T.axis.spatial(128, i5)
@T.prim_func
def block_with_remap_explicitly():
for i0, i1, i2, i3, i4, i5 in T.grid(128, 128, 128, 128, 128, 128):
with T.block("update"):
v0 = T.axis.spatial(128, i0 + 1)
v1, v2 = T.axis.remap("SR", [i1, i2])
v3 = T.axis.spatial(128, i3 - 1)
v4, v5 = T.axis.remap("RS", [i4, i5])
expected_output = """
# from tvm.script import tir as T
@T.prim_func
def main():
# with T.block("root"):
for i0, i1, i2, i3, i4, i5 in T.grid(128, 128, 128, 128, 128, 128):
with T.block("update"):
v0 = T.axis.spatial(128, i0 + 1)
v1, v2 = T.axis.remap("SR", [i1, i2])
v3 = T.axis.spatial(128, i3 - 1)
v4, v5 = T.axis.remap("RS", [i4, i5])
T.reads()
T.writes()
T.evaluate(0)"""
_assert_print(block_with_remap_explicitly, expected_output)
_assert_print(block_with_remap_implicitly, expected_output)
def test_root_block():
from tvm.script import tir as T
@T.prim_func
def root_block_implicitly():
a = T.alloc_buffer([128, 128])
for i, j in T.grid(128, 128):
with T.block():
T.evaluate(0)
@T.prim_func
def root_block_explicitly():
with T.block("root"):
a = T.alloc_buffer([128, 128])
for i, j in T.grid(128, 128):
with T.block():
T.evaluate(0)
expected_output = """
# from tvm.script import tir as T
@T.prim_func
def main():
# with T.block("root"):
a = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block(""):
T.reads()
T.writes()
T.evaluate(0)
"""
_assert_print(root_block_implicitly, expected_output)
_assert_print(root_block_explicitly, expected_output)
if __name__ == "__main__":
tvm.testing.main()
| 17,343 | 20.599004 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_blob.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ctypes
import numpy as np
from tvm import relay
import tvm.relay.testing
from tvm.contrib import graph_executor, cc, utils, popen_pool, tar
import tvm
import tvm.testing
from tvm.script import ir as I, tir as T
@tvm.testing.uses_gpu
def test_synthetic():
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
input_shape = (1, 5, 23, 61)
def verify(data):
mod, params = relay.testing.synthetic.get_workload(input_shape=input_shape)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, "llvm", params=params)
dev = tvm.cpu()
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
out = module.get_output(0).numpy()
return out
synthetic_mod, synthetic_params = relay.testing.synthetic.get_workload(input_shape=input_shape)
with tvm.transform.PassContext(opt_level=3):
synthetic_gpu_lib = relay.build_module.build(synthetic_mod, "cuda", params=synthetic_params)
temp = utils.tempdir()
path_lib = temp.relpath("deploy_lib.so")
synthetic_gpu_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape).astype("float32")
dev = tvm.cuda()
module = graph_executor.GraphModule(loaded_lib["default"](dev))
module.set_input("data", data)
module.run()
out = module.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_cuda_multi_lib():
# test combining two system lib together
# each contains a fatbin component in cuda
dev = tvm.cuda(0)
for device in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(device):
print("skip because %s is not enabled..." % device)
return
@tvm.script.ir_module
class ModA:
I.module_attrs({"system_lib_prefix": "modA_"})
@T.prim_func
def my_inplace_update(x: T.Buffer((12), "float32")) -> None:
T.func_attr({"global_symbol": "modA_my_inplace_update"})
for bx in T.thread_binding(T.int64(1), thread="blockIdx.x"):
for tx in T.thread_binding(T.int64(12), thread="threadIdx.x"):
x[tx] = x[tx] + 1
@tvm.script.ir_module
class ModB:
I.module_attrs({"system_lib_prefix": "modB_"})
@T.prim_func
def my_inplace_update(x: T.Buffer((12), "float32")) -> None:
T.func_attr({"global_symbol": "modB_my_inplace_update"})
for bx in T.thread_binding(T.int64(1), thread="blockIdx.x"):
for tx in T.thread_binding(T.int64(12), thread="threadIdx.x"):
x[tx] = x[tx] + 2
temp = utils.tempdir()
target = tvm.target.Target("cuda", host="llvm")
libA = tvm.build(ModA, target=target)
libB = tvm.build(ModB, target=target)
pathA = temp.relpath("libA.tar")
pathB = temp.relpath("libB.tar")
pathAll = temp.relpath("libAll.a")
path_dso = temp.relpath("mylib.so")
libA.export_library(pathA, tar.tar)
libB.export_library(pathB, tar.tar)
cc.create_staticlib(pathAll, [pathA, pathB])
# package two static libs together
cc.create_shared(path_dso, ["-Wl,--whole-archive", pathAll, "-Wl,--no-whole-archive"])
def popen_check():
# Load dll, will trigger system library registration
ctypes.CDLL(path_dso)
# Load the system wide library
dev = tvm.cuda()
a_np = np.random.uniform(size=12).astype("float32")
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(a_np, dev)
syslibA = tvm.runtime.system_lib("modA_")
syslibB = tvm.runtime.system_lib("modB_")
# reload same lib twice
syslibA = tvm.runtime.system_lib("modA_")
syslibA["my_inplace_update"](a_nd)
syslibB["my_inplace_update"](b_nd)
np.testing.assert_equal(a_nd.numpy(), a_np + 1)
np.testing.assert_equal(b_nd.numpy(), a_np + 2)
# system lib should be loaded in different process
worker = popen_pool.PopenWorker()
worker.send(popen_check)
worker.recv()
if __name__ == "__main__":
test_synthetic()
test_cuda_multilib()
| 5,156 | 35.574468 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_arith_iter_affine_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.tir import floordiv, floormod
def ifuse(inputs, pred_extent=None):
"""Fuse iterators"""
value, extent = 0, 1
for i, ext in inputs:
value = value * ext + i
extent = extent * ext
return value, extent if pred_extent is None else pred_extent
def isplit(axis, factor):
"""Split iterators"""
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
return [
(fld(axis[0], factor), fld(axis[1] + (factor - 1), factor)),
(flm(axis[0], factor), factor),
]
def var_dom(iters):
"""Get domains of iterators"""
return {var: tvm.ir.Range(0, ext) for var, ext in iters}
def convert_iter_expr(expr):
return tvm.arith.normalize_iter_map_to_expr(expr)
def assert_iter_sum_pattern(
expect_dict, dom_map, predicate=True, check_level="surjective", simplify_trivial_iterators=True
):
keys = list(expect_dict.keys())
res = tvm.arith.detect_iter_map(
keys,
dom_map,
predicate=predicate,
check_level=check_level,
simplify_trivial_iterators=simplify_trivial_iterators,
)
indices = res.indices
assert len(indices) == len(keys), res.errors
for i, input_iter in enumerate(keys):
spec = expect_dict[input_iter]
(
extent,
base,
) = spec[0:2]
scale = spec[2] if len(spec) > 2 else 1
expect_iter = spec[3] if len(spec) > 3 else None
sum_expr = indices[i]
assert isinstance(sum_expr, tvm.arith.IterSumExpr)
if extent == 1:
assert len(sum_expr.args) == 0
else:
assert len(sum_expr.args) == 1
tvm.testing.assert_prim_expr_equal(sum_expr.args[0].extent, extent)
tvm.testing.assert_prim_expr_equal(sum_expr.args[0].scale, scale)
tvm.testing.assert_prim_expr_equal(sum_expr.base, base)
if expect_iter is not None:
if not isinstance(expect_iter, tvm.arith.IterMapExpr):
sum_expr = convert_iter_expr(sum_expr)
tvm.ir.assert_structural_equal(sum_expr, expect_iter)
def assert_iter_map_simplify(
expect_dict, dom_map, predicate=True, check_level="surjective", simplify_trivial_iterators=True
):
keys = list(expect_dict.keys())
imap = tvm.arith.detect_iter_map(
keys,
dom_map,
predicate=predicate,
check_level=check_level,
simplify_trivial_iterators=simplify_trivial_iterators,
)
res = tvm.arith.iter_map_simplify(
keys,
dom_map,
predicate=predicate,
check_level=check_level,
simplify_trivial_iterators=simplify_trivial_iterators,
)
for i, input_expr in enumerate(keys):
expected_expr = expect_dict[input_expr]
tvm.ir.assert_structural_equal(res[i], expected_expr)
def assert_iter_sum_failure(iters, dom_map, predicate=True, check_level="surjective"):
res = tvm.arith.detect_iter_map(
list(iters), dom_map, predicate=predicate, check_level=check_level
).indices
assert len(res) == 0
def test_trivial():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
dom_map = var_dom([(x, 3), (y, 4), (z, 1)])
assert_iter_sum_pattern({x: (3, 0), y: (4, 0), 3: (1, 3)}, dom_map)
assert_iter_sum_pattern({x: (3, 0), 3: (1, 3)}, dom_map)
# not independent
assert_iter_sum_failure([x, x, 3], dom_map)
assert_iter_sum_pattern(
{x: (3, 0), y: (4, 0)}, dom_map, check_level="bijective", simplify_trivial_iterators=True
)
assert_iter_sum_pattern(
{x: (3, 0), y: (4, 0)}, dom_map, check_level="bijective", simplify_trivial_iterators=False
)
assert_iter_sum_failure([x, z], dom_map, check_level="bijective")
def test_fuse():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
c = tvm.tir.SizeVar("c", "int32")
c0 = tvm.tir.SizeVar("c0", "int32")
assert_iter_sum_pattern({y * 3 + 1 + c + x: (12, 1 + c)}, var_dom([(x, 3), (y, 4)]))
assert_iter_sum_pattern({ifuse([(x, 3), (y, 4)])[0]: (12, 0)}, var_dom([(x, 3), (y, 4)]))
# fuse with symbolic factor
assert_iter_sum_pattern({(y + 1) * c + x: (4 * c, c)}, var_dom([(x, c), (y, 4)]))
# duplication
assert_iter_sum_failure([y * 3 + x, y], var_dom([(x, 3), (y, 4)]))
assert_iter_sum_failure([y, x + 1, y], var_dom([(x, 3), (y, 4)]))
# factor mismatch
assert_iter_sum_failure([y * 4 + x], var_dom([(x, 3), (y, 4)]))
# simple stride pattern
assert_iter_sum_pattern({x * 4 + y * 2: (6, 0, 2, (x * 2 + y) * 2)}, var_dom([(x, 3), (y, 2)]))
# simple stride pattern with symbolic
assert_iter_sum_pattern(
{x * 2 * c0 + y * 2: (3 * c0, 0, 2, (x * c0 + y) * 2)}, var_dom([(x, 3), (y, c0)])
)
def test_split():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
c0 = tvm.tir.SizeVar("c0", "int32")
c1 = tvm.tir.SizeVar("c1", "int32")
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
assert_iter_sum_pattern({fld(x, 3): (8, 0), flm(x, 3) * 2 + c1: (3, c1, 2)}, var_dom([(x, 24)]))
assert_iter_sum_pattern(
{fld(x, 6): (4, 0), fld(flm(x, 6), 2): (3, 0), flm(x, 2): (2, 0)}, var_dom([(x, 24)])
)
# simple symbolic bound
# TODO(tvm-team) improve symbolic divisible check to enable
# more complicated symbolic bound
assert_iter_sum_pattern({fld(x, c0): (c1, 0), flm(x, c0): (c0, 0)}, var_dom([(x, c1 * c0)]))
assert_iter_sum_pattern({fld(x * 2, 4): (4, 0, 1), flm(x * 2, 4): (2, 0, 2)}, var_dom([(x, 8)]))
assert_iter_sum_pattern(
{
fld(x * 2, 4) * 4 + flm(x * 2, 4): (8, 0, 2),
},
var_dom([(x, 8)]),
)
assert_iter_sum_failure([fld(x, flm(flm(y, 8), 6))], var_dom([(x, 24), (y, 8)]))
# domain of x is undefined
assert_iter_sum_pattern(
{fld(flm(x, 49) + y, 49): (1, fld(flm(x, 49) + y, 49))}, var_dom([(y, 1)])
)
def test_compound():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
xo, xi = isplit((x, 10), 5)
yo, yi = isplit((y, 9), 3)
z = ifuse([yo, xo, yi])
# reconstruct the pattern manually
mx = tvm.arith.IterMark(x, 10)
my = tvm.arith.IterMark(y, 9)
xoscale = 3
yoscale = 6
yiscale = 1
mxo = tvm.arith.IterSplitExpr(mx, 5, 2, xoscale)
myo = tvm.arith.IterSplitExpr(my, 3, 3, yoscale)
myi = tvm.arith.IterSplitExpr(my, 1, 3, yiscale)
mz = tvm.arith.IterMark(tvm.arith.IterSumExpr([myo, mxo, myi], 0), 18)
sz = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(mz, 1, 18, 1)], 0)
assert_iter_sum_pattern({z[0]: (18, 0, 1, sz), xi[0]: (5, 0)}, var_dom([(x, 10), (y, 9)]))
def test_compound_floormod_two_regression():
x = tvm.tir.Var("x", "int32")
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
# regression
# extent of 2 of negative scale cannot be normalized
assert_iter_sum_failure(
[fld(x, 2) * 2 - flm(x, 2) + 1],
dom_map=var_dom([(x, 8)]),
)
def test_predicate():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
# available contraints
# upper bound only
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y < 128
)
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y <= 127
)
# lower bound only
assert_iter_sum_pattern(
{x * 10 + y: (124, 6)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y > 5
)
assert_iter_sum_pattern(
{x * 10 + y: (124, 6)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y >= 6
)
# lower bound + upper bound
assert_iter_sum_pattern(
{x * 10 + y: (122, 6)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.And(x * 10 + y > 5, x * 10 + y < 128),
)
assert_iter_sum_pattern(
{x * 10 + y: (122, 6)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.And(x * 10 + y >= 6, x * 10 + y <= 127),
)
# constraint on one fused iter
i = tvm.tir.Var("i", "int32")
j = tvm.tir.Var("j", "int32")
k = tvm.tir.Var("k", "int32")
assert_iter_sum_pattern(
{i * 8 + j * 2 + k: (88, 1)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(1 <= j * 2 + k, j * 2 + k < 9),
)
# constraint on single var
assert_iter_sum_pattern({i: (10, 0)}, var_dom([(i, 48)]), predicate=i < 10)
# iterations are subparts of constraint, invalid case 1
assert_iter_sum_failure(
[i, j, k],
var_dom([(i, 128), (j, 128), (k, 128)]),
predicate=tvm.tir.all(i * 16384 + j * 128 + k < 100),
)
# iterations are subparts of constraint, invalid case 2
assert_iter_sum_failure(
[i * 128 + j, k],
var_dom([(i, 128), (j, 128), (k, 128)]),
predicate=i * 16384 + j * 128 + k < 100,
)
# irrelavant predicate
assert_iter_sum_pattern({i + j: (1, j)}, var_dom([(i, 1)]), predicate=j <= 24)
# constraint on nested fused iters
assert_iter_sum_pattern(
{i * 8 + j * 2 + k: (22, 3)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(
1 <= j * 2 + k, j * 2 + k < 9, 3 <= i * 8 + j * 2 + k, i * 8 + j * 2 + k < 25
),
)
# duplicate constraint on one fused iter
assert_iter_sum_pattern(
{i * 6 + j * 2 + k: (66, 2)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(1 <= j * 2 + k, 2 <= j * 2 + k, j * 2 + k < 8, j * 2 + k < 9),
)
# duplicate constraint on nested fused iters
assert_iter_sum_pattern(
{i * 6 + j * 2 + k: (15, 3)},
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(
1 <= j * 2 + k,
2 <= j * 2 + k,
j * 2 + k < 8,
j * 2 + k < 9,
3 <= i * 6 + j * 2 + k,
i * 6 + j * 2 + k < 25,
1 <= i * 6 + j * 2 + k,
i * 6 + j * 2 + k < 18,
),
)
# constraint on non-disjoint fused iters should fail
assert_iter_sum_failure(
[i * 8 + j * 2 + k],
var_dom([(i, 11), (j, 5), (k, 2)]),
predicate=tvm.tir.all(2 <= j * 2 + k, 0 <= i * 4 + j),
)
# constraint on many disjoint fused iters, case 1
# i4 * 6 + i5 in [3, 9), extent=6 (= scale of i2)
# i2 * 30 + i3 * 15 in [30, 90), extent=60 (= scale of i1)
# i1 * 60 in [60, 240), extent=180 (= scale of i0)
i0 = tvm.tir.Var("i0", "int32")
i1 = tvm.tir.Var("i1", "int32")
i2 = tvm.tir.Var("i2", "int32")
i3 = tvm.tir.Var("i3", "int32")
i4 = tvm.tir.Var("i4", "int32")
i5 = tvm.tir.Var("i5", "int32")
assert_iter_sum_pattern(
{i0 * 180 + i1 * 60 + i2 * 30 + i3 * 15 + i4 * 6 + i5: (540, 93)},
var_dom([(i0, 3), (i1, 4), (i2, 3), (i3, 2), (i4, 3), (i5, 6)]),
predicate=tvm.tir.all(1 <= i1, 2 <= i2 * 2 + i3, 3 <= i4 * 6 + i5),
)
# constraint on many disjoint fused iters, case 2
assert_iter_sum_pattern(
{i0 * 45 + i1 * 45 + i2 * 9 + i3 * 4 + i4: (135, 28)},
var_dom([(i0, 3), (i1, 2), (i2, 5), (i3, 3), (i4, 4)]),
predicate=tvm.tir.all(
3 <= i1 * 5 + i2, i1 * 5 + i2 < 8, 1 <= i3 * 4 + i4, i3 * 4 + i4 < 10
),
)
# constraint on split iters
assert_iter_sum_pattern(
{i % 16: (7, 3), i // 16: (8, 4)},
var_dom([(i, 1024)]),
predicate=tvm.tir.all(3 <= i % 16, i % 16 < 10, 4 <= i // 16, i // 16 < 12),
check_level="bijective",
)
# constraint on split iters, nested case 1
assert_iter_sum_pattern(
{(i * 32 + j) % 16: (7, 3)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(3 <= (i * 32 + j) % 16, (i * 32 + j) % 16 < 10),
)
# constraint on split iters, nested case 2
assert_iter_sum_failure(
[
(i * 32 + j) % 16,
],
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 32),
check_level="bijective",
)
assert_iter_sum_pattern(
{(i * 32 + j) % 16: (16, 0)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 32),
)
assert_iter_sum_pattern(
{(i * 32 + j - 1) % 16: (16, 0), (i * 32 + j - 1) // 16: (4, 0)},
var_dom([(i, 5), (j, 32)]),
predicate=tvm.tir.all(1 <= i * 32 + j, i * 32 + j <= 64),
)
# non-standard form of predicate
assert_iter_sum_pattern(
{x * 10 + y: (128, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 < 128 - y
)
# duplicate constraint
assert_iter_sum_pattern(
{x * 10 + y: (64, 0)},
var_dom([(x, 13), (y, 10)]),
predicate=tvm.tir.all(x * 10 + y < 128, x * 10 + y < 64),
)
# useless constraint
assert_iter_sum_pattern(
{x * 10 + y: (130, 0)}, var_dom([(x, 13), (y, 10)]), predicate=x * 10 + y < 140
)
i1 = tvm.tir.Var("i1", "int32")
i2 = tvm.tir.Var("i2", "int32")
i3 = tvm.tir.Var("i3", "int32")
i4 = tvm.tir.Var("i4", "int32")
assert_iter_sum_pattern(
{i1 * 20 + i2 * 10 + i3 * 3 + i4: (128, 0)},
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 10,
)
),
)
# wrong constraint
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 7,
)
),
)
# incompatible constraint
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i3 * 3 + i4 < 10,
i1 * 4 + i3 < 20,
)
),
)
assert_iter_sum_failure(
[i1 * 20 + i2 * 10 + i3 * 3 + i4],
var_dom([(i1, 7), (i2, 2), (i3, 4), (i4, 3)]),
predicate=(
tvm.tir.all(
i1 * 2 + i2 < 13,
i1 * 20 + i2 * 10 + i3 * 3 + i4 < 128,
i1 * 4 + i3 < 20,
)
),
)
# zero iter
xo = tvm.tir.Var("xo", "int32")
xi = tvm.tir.Var("xi", "int32")
y = tvm.tir.Var("y", "int32")
assert_iter_sum_pattern(
{xo * 129 + xi: (128, 0), y: (128, 0)},
var_dom([(xo, 1), (xi, 129), (y, 128)]),
predicate=xo * 129 + xi < 128,
)
# strided iteration predicate
assert_iter_sum_pattern(
{xo * 16 + xi * 4: (10, 0, 4)},
var_dom([(xo, 3), (xi, 4)]),
predicate=xo * 4 + xi < 10,
)
def convert_division(divisions):
if divisions is None or len(divisions) == 0:
return []
res = []
for division in divisions[:-1]:
res.append(
[
tvm.arith.normalize_iter_map_to_expr(division[0].source),
tvm.arith.normalize_iter_map_to_expr(division[1].source),
]
)
res.append([divisions[-1][0].extent, divisions[-1][1].extent])
return res
def create_iter(name, extent):
return tvm.tir.Var(name, "int32"), extent
def test_subspace_division():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
c = tvm.tir.SizeVar("c", "int32")
# simple 1.1
res = tvm.arith.subspace_divide(
[z * 12 + y * 3 + x + c], var_dom([(x, 3), (y, 4), (z, 5)]), [x]
)
res = convert_division(res)
assert len(res) == 2
tvm.ir.assert_structural_equal(res[0][0], z * 4 + y)
tvm.ir.assert_structural_equal(res[0][1], x + c)
# simple 1.2
res = tvm.arith.subspace_divide(
[z * 12 + y * 3 + x + c], var_dom([(x, 3), (y, 4), (z, 5)]), [x], z * 4 + y < 18
)
res = convert_division(res)
assert len(res) == 2
tvm.ir.assert_structural_equal(res[0][0], z * 4 + y)
tvm.ir.assert_structural_equal(res[0][1], x + c)
tvm.ir.assert_structural_equal(res[1][0], z * 4 + y < 18)
tvm.ir.assert_structural_equal(res[1][1], True)
# compound 1
i0 = create_iter("i0", 4)
j0 = create_iter("j0", 8)
i3 = create_iter("i3", 2)
i1, i2 = isplit(j0, 4)
k0 = ifuse([i0, i1])
k1 = ifuse([i2, i3])
# compound 1.1
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]])
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], (i0[0] * 2) + floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], floormod(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][1], i3[0])
assert_iter_sum_pattern
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0, j0])).indices
assert len(res2) == 2
# compound 1.2
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [j0[0], i3[0]])
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], i0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], (floormod(j0[0], 4) * 2) + i3[0])
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([j0, i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0])).indices
assert len(res2) == 2
# compound 1.3
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i0[0], i3[0]])
res = convert_division(res)
assert len(res) == 0
# compound 1.4
res = tvm.arith.subspace_divide([k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]], k0[0] < 7)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], (i0[0] * 2) + floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], floormod(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][1], i3[0])
tvm.ir.assert_structural_equal(res[2][0], (i0[0] * 2) + floordiv(j0[0], 4) < 7)
tvm.ir.assert_structural_equal(res[2][1], True)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0, j0])).indices
assert len(res2) == 2
# compound 1.5
res = tvm.arith.subspace_divide(
[k0[0], k1[0]], var_dom([i0, j0, i3]), [j0[0], i3[0]], k1[0] < 7
)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], i0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(j0[0], 4))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], (floormod(j0[0], 4) * 2) + i3[0])
tvm.ir.assert_structural_equal(res[2][0], True)
tvm.ir.assert_structural_equal(res[2][1], (floormod(j0[0], 4) * 2) + i3[0] < 7)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1]], var_dom([j0, i3])).indices
assert len(res1) == 2
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0]], var_dom([i0])).indices
assert len(res2) == 2
# compound 1.6
res = tvm.arith.subspace_divide(
[k0[0], k1[0]], var_dom([i0, j0, i3]), [i3[0]], tvm.tir.all(k0[0] < 7, k1[0] < 7)
)
res = convert_division(res)
assert len(res) == 0
# compound 2
j0 = create_iter("j0", 4)
l0 = create_iter("l0", 2)
l1 = create_iter("l1", 6)
j3 = create_iter("j3", 3)
k0 = ifuse([l0, l1])
i1, j2 = isplit(k0, 3)
j1, i1 = isplit(i1, 2)
i0 = ifuse([j0, j1])
i2 = ifuse([j2, j3])
# compound 2.1
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l1[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], (j0[0] * 2) + l0[0])
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(l1[0], 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l1[0], 3) * 3) + j3[0])
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1], res[2][1]], var_dom([l1, j3])).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0, l0])).indices
assert len(res2) == 3
# compound 2.2
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l0[0], l1[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], j0[0])
tvm.ir.assert_structural_equal(res[0][1], floordiv(l0[0] * 6 + l1[0], 6))
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(floormod(l0[0] * 6 + l1[0], 6), 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l0[0] * 6 + l1[0], 3) * 3) + j3[0])
res1 = tvm.arith.detect_iter_map(
[res[0][1], res[1][1], res[2][1]], var_dom([l0, l1, j3])
).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0])).indices
assert len(res2) == 3
# compound 2.3
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [l0[0], j3[0]]
)
res = convert_division(res)
assert len(res) == 0
# compound 2.4
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]],
var_dom([j0, l0, l1, j3]),
[l1[0], j3[0]],
tvm.tir.all(i0[0] < 7, i2[0] < 8),
)
res = convert_division(res)
assert len(res) == 4
tvm.ir.assert_structural_equal(res[0][0], (j0[0] * 2) + l0[0])
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], floordiv(l1[0], 3))
tvm.ir.assert_structural_equal(res[2][0], 0)
tvm.ir.assert_structural_equal(res[2][1], (floormod(l1[0], 3) * 3) + j3[0])
tvm.ir.assert_structural_equal(res[3][0], (j0[0] * 2) + l0[0] < 7)
tvm.ir.assert_structural_equal(res[3][1], (floormod(l1[0], 3) * 3) + j3[0] < 8)
res1 = tvm.arith.detect_iter_map([res[0][1], res[1][1], res[2][1]], var_dom([l1, j3])).indices
assert len(res1) == 3
res2 = tvm.arith.detect_iter_map([res[0][0], res[1][0], res[2][0]], var_dom([j0, l0])).indices
assert len(res2) == 3
# compound 2.5
res = tvm.arith.subspace_divide(
[i0[0], i1[0], i2[0]], var_dom([j0, l0, l1, j3]), [j3[0]], i2[0] < 8
)
res = convert_division(res)
assert len(res) == 0
def test_subspace_divide_trivial_iters():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
# trivial 1.1
res = tvm.arith.subspace_divide(
[x * 16 + y], var_dom([(x, 1), (y, 16)]), [y], simplify_trivial_iterators=False
)
res = convert_division(res)
assert len(res) == 2
tvm.ir.assert_structural_equal(res[0][0], x)
tvm.ir.assert_structural_equal(res[0][1], y)
# trivial 1.2
res = tvm.arith.subspace_divide(
[x, y],
var_dom([(x, 1), (y, 1)]),
[y],
simplify_trivial_iterators=False,
)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], x)
tvm.ir.assert_structural_equal(res[0][1], 0)
tvm.ir.assert_structural_equal(res[1][0], 0)
tvm.ir.assert_structural_equal(res[1][1], y)
def test_complex():
n0 = create_iter("n0", 2)
n1 = create_iter("n1", 4)
m0 = ifuse([n0, n1], 6)
m1 = create_iter("m1", 3)
l0 = create_iter("l0", 4)
l1 = create_iter("l1", 8)
l2 = ifuse([m0, m1], 16)
l3 = create_iter("l3", 32)
k0, k4 = isplit(l0, 2)
k1, k5 = isplit(l1, 2)
k2, k6 = isplit(l2, 4)
k3, k7 = isplit(l3, 4)
j0 = ifuse([k0, k1], 7)
j1 = ifuse([k2, k3])
j2 = ifuse([k4, k5])
j3 = ifuse([k6, k7], 15)
i0 = ifuse([j0, j1], 200)
i1 = ifuse([j2, j3], 50)
n0_mark = tvm.arith.IterMark(n0[0], n0[1])
n1_mark = tvm.arith.IterMark(n1[0], n1[1])
l0_mark = tvm.arith.IterMark(l0[0], l0[1])
l1_mark = tvm.arith.IterMark(l1[0], l1[1])
m1_mark = tvm.arith.IterMark(m1[0], m1[1])
l3_mark = tvm.arith.IterMark(l3[0], l3[1])
m0_expr = tvm.arith.IterSumExpr(
[
tvm.arith.IterSplitExpr(n0_mark, 1, n0[1], 4),
tvm.arith.IterSplitExpr(n1_mark, 1, n1[1], 1),
],
0,
)
m0_mark = tvm.arith.IterMark(m0_expr, 6)
l2_expr = tvm.arith.IterSumExpr(
[tvm.arith.IterSplitExpr(m0_mark, 1, 6, 3), tvm.arith.IterSplitExpr(m1_mark, 1, m1[1], 1)],
0,
)
l2_mark = tvm.arith.IterMark(l2_expr, 16)
k0_expr = tvm.arith.IterSplitExpr(l0_mark, 2, 2, 4)
k1_expr = tvm.arith.IterSplitExpr(l1_mark, 2, 4, 1)
k2_expr = tvm.arith.IterSplitExpr(l2_mark, 4, 4, 8)
k3_expr = tvm.arith.IterSplitExpr(l3_mark, 4, 8, 1)
k4_expr = tvm.arith.IterSplitExpr(l0_mark, 1, 2, 30)
k5_expr = tvm.arith.IterSplitExpr(l1_mark, 1, 2, 15)
k6_expr = tvm.arith.IterSplitExpr(l2_mark, 1, 4, 4)
k7_expr = tvm.arith.IterSplitExpr(l3_mark, 1, 4, 1)
j0_expr = tvm.arith.IterSumExpr([k0_expr, k1_expr], 0)
j0_mark = tvm.arith.IterMark(j0_expr, 7)
i0_expr = tvm.arith.IterSumExpr(
[tvm.arith.IterSplitExpr(j0_mark, 1, 7, 32), k2_expr, k3_expr], 0
)
j3_expr = tvm.arith.IterSumExpr([k6_expr, k7_expr], 0)
j3_mark = tvm.arith.IterMark(j3_expr, 15)
i1_expr = tvm.arith.IterSumExpr(
[k4_expr, k5_expr, tvm.arith.IterSplitExpr(j3_mark, 1, 15, 1)], 0
)
i0_mark = tvm.arith.IterMark(i0_expr, i0[1])
i1_mark = tvm.arith.IterMark(i1_expr, i1[1])
i0_final = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(i0_mark, 1, i0[1], 1)], 0)
i1_final = tvm.arith.IterSumExpr([tvm.arith.IterSplitExpr(i1_mark, 1, i1[1], 1)], 0)
assert_iter_sum_pattern(
{i0[0]: (200, 0, 1, i0_final), i1[0]: (50, 0, 1, i1_final)},
var_dom([l0, l1, n0, n1, m1, l3]),
predicate=tvm.tir.all(
i0[0] < 200, i1[0] < 50, m0[0] < 6, l2[0] < 16, j0[0] < 7, j3[0] < 15
),
)
# wrong constraint
assert_iter_sum_failure(
[i0[0], i1[0]],
var_dom([l0, l1, n0, n1, m1, l3]),
tvm.tir.all(i0[0] < 200, i1[0] < 50, m0[0] < 9, l2[0] < 16, j0[0] < 7, j3[0] < 14),
)
# subspace_division
res = tvm.arith.subspace_divide(
[i0[0], i1[0]],
var_dom([l0, l1, n0, n1, m1, l3]),
[n0[0], n1[0], m1[0], l3[0]],
tvm.tir.all(m0[0] < 6, l2[0] < 16, j0[0] < 7, j3[0] < 15),
)
res = convert_division(res)
assert len(res) == 3
tvm.ir.assert_structural_equal(res[0][0], floordiv(l0[0], 2) * 4 + floordiv(l1[0], 2))
tvm.ir.assert_structural_equal(
res[0][1], (floordiv((n0[0] * 4 + n1[0]) * 3 + m1[0], 4) * 8) + floordiv(l3[0], 4)
)
tvm.ir.assert_structural_equal(res[1][0], ((floormod(l0[0], 2) * 2) + floormod(l1[0], 2)))
tvm.ir.assert_structural_equal(
res[1][1], ((floormod(((n0[0] * 4 + n1[0]) * 3 + m1[0]), 4) * 4) + floormod(l3[0], 4))
)
tvm.ir.assert_structural_equal(res[2][0], (floordiv(l0[0], 2) * 4) + floordiv(l1[0], 2) < 7)
tvm.ir.assert_structural_equal(
res[2][1],
tvm.tir.all(
n0[0] * 4 + n1[0] < 6,
(n0[0] * 4 + n1[0]) * 3 + m1[0] < 16,
floormod(((n0[0] * 4 + n1[0]) * 3 + m1[0]), 4) * 4 + floormod(l3[0], 4) < 15,
),
)
assert_iter_sum_pattern(
{res[0][1]: (32, 0), res[1][1]: (15, 0)}, var_dom([n0, n1, m1, l3]), res[2][1]
)
assert_iter_sum_pattern({res[0][0]: (8, 0), res[1][0]: (4, 0)}, var_dom([l0, l1]))
def test_normalize_iter_map_to_expr():
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
xo, xi = isplit((x, 10), 5)
yo, yi = isplit((y, 9), 3)
z = ifuse([yo, xo, yi])
res = tvm.arith.detect_iter_map([z[0], xi[0]], var_dom([(x, 10), (y, 9)]))
tvm.ir.assert_structural_equal(
tvm.arith.normalize_iter_map_to_expr(res.indices[0]),
fld(y, 3) * 6 + fld(x, 5) * 3 + flm(y, 3),
)
tvm.ir.assert_structural_equal(tvm.arith.normalize_iter_map_to_expr(res.indices[1]), flm(x, 5))
# iter mark wrap a complex expr
split = tvm.arith.IterSplitExpr(tvm.arith.IterMark(x * y + 1, 1024), 1, 1024, 1)
tvm.ir.assert_structural_equal(tvm.arith.normalize_iter_map_to_expr(split), x * y + 1)
def test_inverse_affine_iter_map():
analyzer = tvm.arith.Analyzer()
l0 = create_iter("l0", 64)
l1 = create_iter("l1", 64)
l2 = create_iter("l2", 64)
# simple case
l0_0, l0_1 = isplit(l0, 16)
l1_0, l1_1 = isplit(l1, 4)
l0_1_l1_1_fused = ifuse([l0_1, l1_1])
iter_map = tvm.arith.detect_iter_map(
[l0_1_l1_1_fused[0], l0_0[0], l1_0[0]], var_dom([l0, l1])
).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 2
l0_inverse = floordiv(outputs[0], 4) + outputs[1] * 16
l1_inverse = floormod(outputs[0], 4) + outputs[2] * 4
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
assert analyzer.can_prove_equal(res[l1[0]], l1_inverse)
# compound case
l0_0, l0_1 = isplit(l0, 16)
l1_0, l1_1 = isplit(l1, 4)
l2_1, l2_2 = isplit(l2, 4)
l2_0, l2_1 = isplit(l2_1, 4)
l0_1_l2_1_l1_1_l2_0_fused = ifuse([l0_1, l2_1, l1_1, l2_0])
iter_map = tvm.arith.detect_iter_map(
[l0_1_l2_1_l1_1_l2_0_fused[0], l0_0[0], l2_2[0], l1_0[0]], var_dom([l0, l1, l2])
).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 3
l0_inverse = floordiv(outputs[0], 64) + outputs[1] * 16
l1_inverse = floormod(floordiv(outputs[0], 4), 4) + outputs[3] * 4
l2_inverse = (
floormod(outputs[0], 4) * 16 + floormod(floordiv(outputs[0], 16), 4) * 4 + outputs[2]
)
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
assert analyzer.can_prove_equal(res[l1[0]], l1_inverse)
assert analyzer.can_prove_equal(res[l2[0]], l2_inverse)
# diamond-shape DAG
l0_0, l0_1 = isplit(l0, 16)
l1 = ifuse([l0_1, l0_0])
l1_0, l1_1 = isplit(l1, 8)
l2 = ifuse([l1_1, l1_0])
iter_map = tvm.arith.detect_iter_map([l2[0]], var_dom([l0])).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
assert len(res) == 1
l1_inverse = floormod(outputs[0], 8) * 8 + floordiv(outputs[0], 8)
l0_inverse = floormod(l1_inverse, 4) * 16 + floordiv(l1_inverse, 4)
assert analyzer.can_prove_equal(res[l0[0]], l0_inverse)
def test_inverse_affine_map_trivial_iter():
analyzer = tvm.arith.Analyzer()
l0 = create_iter("l0", 64)
l1 = create_iter("l1", 64)
iter_map = tvm.arith.detect_iter_map([0, l0[0], l1[0]], var_dom([l0, l1])).indices
outputs = [tvm.tir.Var("output_{}".format(i), "int32") for i in range(len(iter_map))]
res = tvm.arith.inverse_affine_iter_map(iter_map, outputs)
# output_0 is expected to be constant and it is not included in the inverse map
assert len(res) == 2
assert analyzer.can_prove_equal(res[l0[0]], outputs[1])
assert analyzer.can_prove_equal(res[l1[0]], outputs[2])
def test_free_variables():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
# illegal iter if z is within dom
assert_iter_sum_failure([z * 19 + y * 3 + x], var_dom([(x, 3), (y, 3), (z, 3)]))
# iter is valid if z is free, even there are linear forms of z
assert_iter_sum_pattern(
{z * 19 + y * 3 + x: (9, z * 19)},
var_dom(
[
(x, 3),
(y, 3),
]
),
)
assert_iter_sum_pattern(
{z * z + y * 3 + x: (9, z * z)},
var_dom(
[
(x, 3),
(y, 3),
]
),
)
class TestPadding:
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
fld = tvm.tir.floordiv
flm = tvm.tir.floormod
positive_test_case = tvm.testing.parameter(
# left padding only, offset divisible
({y: 192}, {fld(64 + y, 32): (6, 2, 1), flm(64 + y, 32): (32, 0, 1)}, "bijective"),
# left padding only, offset non-divisible
({y: 176}, {fld(80 + y, 32): (6, 2, 1)}),
({y: 176}, {flm(fld(80 + y, 2), 16): (16, 0, 1), flm(80 + y, 2): (2, 0, 1)}),
# right padding only, offset divisible
({x: 5, y: 4}, {fld(x * 32 + y * 8, 16): (10, 0, 1), flm(x * 32 + y * 8, 16): (2, 0, 8)}),
# right padding only, offset non-divisible
({x: 26}, {fld(x, 15): (2, 0, 1)}),
({x: 26}, {flm(fld(x, 3), 5): (5, 0, 1), flm(x, 3): (3, 0, 1)}),
# padding constants on both side
({x: 45}, {fld(x + 71, 32): (2, 2, 1)}),
({x: 45}, {flm(fld(x, 4), 8): (8, 0, 1), flm(x, 4): (4, 0, 1)}),
# padding for free iteration part
({y: 360}, {fld(x * 360 + y, 16): (23, fld(x * 360 - flm(x, 2) * 8, 16), 1)}),
({y: 360}, {flm(x * 360 + y, 16): (16, 0, 1)}),
# multiple split with same mark offset, could
# be surjective on missing (padded // LCM)
(
{x: 240},
{
flm(x + 10, 3): (3, 0),
flm(fld(x + 10, 3), 4): (4, 0),
flm(fld(fld(x + 10, 3), 4), 5): (5, 0),
},
),
# different offsets on splits
(
{x: 240},
{
flm(x + 1, 3): (3, 0),
flm(fld(x + 10, 3) + 2, 4): (4, 0),
flm(fld(fld(x + 10, 3), 4) + 3, 5): (5, 0),
},
),
)
negative_test_case = tvm.testing.parameter(
# left padding only, offset non-divisible
({y: 176}, {fld(80 + y, 32), flm(80 + y, 32)}),
({y: 176}, {fld(80 + y, 32), fld(80 + y, 4)}),
# right padding only, offset divisible
({x: 5, y: 4}, {fld(x * 32 + y * 8, 5)}),
# multiple split with same mark offset, could
# be surjective on missing (padded // LCM)
(
{x: 240},
{
flm(x + 10, 3),
flm(fld(x + 10, 3), 4),
flm(fld(fld(x + 10, 3), 4), 5),
fld(fld(fld(x + 10, 3), 4), 5),
},
),
# original extent is smaller than the divident
# it is not surjective wrt to the region [0, 16)
({x: 3}, {flm(x, 16)}),
)
def test_padding(self, positive_test_case):
iter_extent, mapped_iterators, *args = positive_test_case
check_level = args[0] if args else "surjective"
dom_map = {var: tvm.ir.Range(0, ext) for var, ext in iter_extent.items()}
assert_iter_sum_pattern(mapped_iterators, dom_map, check_level=check_level)
def test_padding_error(self, negative_test_case):
iter_extent, mapped_iterators, *args = negative_test_case
check_level = args[0] if args else "surjective"
dom_map = {var: tvm.ir.Range(0, ext) for var, ext in iter_extent.items()}
assert_iter_sum_failure(mapped_iterators, dom_map, check_level=check_level)
def test_overlapped_fuse():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
z = tvm.tir.Var("z", "int32")
a = tvm.tir.Var("x", "int32")
b = tvm.tir.Var("y", "int32")
# non-bijective fuse of two
assert_iter_sum_pattern(
{
x * 7 + y: (22, 0, 1),
},
var_dom([(x, 3), (y, 8)]),
check_level="surjective",
)
assert_iter_sum_failure([x * 7 + y], var_dom([(x, 3), (y, 8)]), check_level="bijective")
# non-bijective fuse of three
assert_iter_sum_pattern(
{
x * 18 + y * 7 + z: (40, 0, 1),
},
var_dom([(x, 2), (y, 3), (z, 8)]),
check_level="surjective",
)
assert_iter_sum_failure([x * 7 + y], var_dom([(x, 2), (y, 3), (z, 8)]), check_level="bijective")
# negative scale fusion is not allowed
assert_iter_sum_failure([x * -7 + y], var_dom([(x, 3), (y, 8)]), check_level="surjective")
assert_iter_sum_failure([x * 7 - y], var_dom([(x, 3), (y, 8)]), check_level="surjective")
# with predicate
assert_iter_sum_pattern(
{
a * 40 + b * 20 + x * 18 + y * 3 + z: (125, 6, 1),
},
var_dom([(a, 3), (b, 2), (x, 2), (y, 6), (z, 8)]),
predicate=tvm.tir.all(z < 4, 1 < x * 6 + y, x * 6 + y < 10),
check_level="surjective",
)
# stride=1 kernel
assert_iter_sum_pattern(
{x + a: (230, 0, 1)}, var_dom([(x, 224), (a, 7)]), check_level="surjective"
)
# do not allow both strided and overlapped
assert_iter_sum_failure([5 * x + 2 * y], var_dom([(x, 4), (y, 3)]), check_level="surjective")
def test_iter_map_simplify_symbolic_case():
"""Test itermap simplify"""
x = tvm.tir.Var("x", "int64")
y = tvm.tir.Var("y", "int64")
z = x * 32 + y
n = tvm.tir.SizeVar("n", "int64")
def simple_fuse0(x):
return (x // n) * n + x % n
assert_iter_map_simplify({simple_fuse0(x): x}, var_dom([(x, n * 32)]))
assert_iter_map_simplify({simple_fuse0(z): z}, var_dom([(x, n), (y, 32)]))
def fsymbolic_fuse0(x):
return ((x // (n * n)) % 32) * (n * n) + ((x // n) % n) * n + x % n
assert_iter_map_simplify({fsymbolic_fuse0(x): x}, var_dom([(x, n * n * 32)]))
assert_iter_map_simplify({fsymbolic_fuse0(z): z}, var_dom([(x, n * n), (y, 32)]))
def fsymbolic_fuse1(x):
return ((x % (n * n * 32)) // (n * n) * n + (x % (n * n) // n)) * n + x % n
assert_iter_map_simplify({fsymbolic_fuse1(x): x}, var_dom([(x, n * n * 32)]))
assert_iter_map_simplify({fsymbolic_fuse1(z): z}, var_dom([(x, n * n), (y, 32)]))
def fsymbolic_fuse2(i):
return (i // (n * n) * n + i % (n * n) // n) * n + i % n
assert_iter_map_simplify({fsymbolic_fuse2(x): x}, var_dom([(x, n * n * 32)]))
def test_iter_map_simplify_symbolic_predicate():
"""Test itermap simplify"""
x = tvm.tir.Var("x", "int64")
y = tvm.tir.Var("y", "int64")
n = tvm.tir.SizeVar("n", "int64")
def simple_fuse0(x):
return (x // n) * n + x % n
z = x * 32 + y
assert_iter_map_simplify(
{simple_fuse0(z): z}, var_dom([(x, (n + 1) // 2), (y, 32)]), predicate=(z < n * 16)
)
def fsymbolic_fuse2(i):
return (i // (n * n) * n + i % (n * n) // n) * n + i % n
z = x * 64 + y
assert_iter_map_simplify(
{fsymbolic_fuse2(z): z},
var_dom([(x, (n * n + 1) // 2), (y, 64)]),
predicate=(z < n * n * 32),
)
def test_iter_map_simplify_symbolic_reshape():
n = tvm.tir.Var("n", "int64")
fused = tvm.tir.Var("fused", "int64")
ax0 = (fused // 4096) // n
ax1 = (fused // 4096) % n
ax2 = fused % 4096
rhs_index = ((ax2 // 4096 + ax0 * n + ax1) % n) * 4096 + ax2 % 4096
assert_iter_map_simplify({rhs_index: fused}, var_dom([(fused, n * 4096)]))
def test_iter_map_simplify_unit_loop_order():
"""Test itermap simplify"""
x = tvm.tir.Var("x", "int64")
y = tvm.tir.Var("y", "int64")
z = tvm.tir.Var("z", "int64")
# trivial iterators can be found at any when comparing via scale
# ensure order unchange
assert_iter_map_simplify(
{x + y + z: x + y + z}, var_dom([(x, 1), (y, 1), (z, 1)]), simplify_trivial_iterators=False
)
# Even with simplifcation, it should follow the original order
assert_iter_map_simplify(
{x + y + (z // 4) * 4 + z % 4: z + x + y},
var_dom([(x, 1), (y, 1), (z, 32)]),
simplify_trivial_iterators=False,
)
assert_iter_map_simplify(
{y + 64 - x % 2 * 64: y + 64 - x % 2 * 64},
var_dom([(x, 6), (y, 64)]),
simplify_trivial_iterators=False,
)
# When we have iterators that have same scale but one of them come
# with unit extent, we should prioritize unit extent
assert_iter_map_simplify(
{x // 128 + y + z: y + z},
var_dom([(x, 128), (y, 128), (z, 1)]),
simplify_trivial_iterators=False,
)
def assert_normalize_to_iter_sum(index, input_iters, args, base):
res = tvm.arith.normalize_to_iter_sum(index, input_iters)
assert isinstance(res, tvm.arith.IterSumExpr)
assert len(res.args) == len(args)
for split, item in zip(res.args, args):
tvm.testing.assert_prim_expr_equal(split.scale, item[1])
tvm.testing.assert_prim_expr_equal(
tvm.arith.normalize_iter_map_to_expr(split), item[0] * item[1]
)
tvm.testing.assert_prim_expr_equal(res.base, base)
def test_normalize_to_iter_sum():
x = tvm.tir.Var("x", "int64")
y = tvm.tir.Var("y", "int64")
z = tvm.tir.Var("z", "int64")
a = tvm.tir.Var("a", "int64")
n = tvm.tir.Var("n", "int64")
assert_normalize_to_iter_sum(
z + ((y + x * 4 + 2) * n) + 3,
var_dom([(x, 9), (y, 4), (z, 3)]),
[(x, n * 4), (y, n), (z, 1)],
2 * n + 3,
)
# max cannot detected so it goes into base
assert_normalize_to_iter_sum(
tvm.tir.max(z, a) + ((y + x * 4 + 2) * n) + 3,
var_dom([(x, 9), (y, 4), (z, 3)]),
[(x, n * 4), (y, n)],
tvm.tir.max(z, a) + 2 * n + 3,
)
# order by symbolc prod
assert_normalize_to_iter_sum(
z + ((y * 4 * a + x * 4 + 2) * n) + 3,
var_dom([(y, a * n * 4), (x, n * 4), (z, a)]),
[(y, a * n * 4), (x, n * 4), (z, 1)],
2 * n + 3,
)
# order by cscale
assert_normalize_to_iter_sum(
z + 2 * y * 3 + 4 * x,
var_dom([(y, a * n * 4), (x, n * 4), (z, a)]),
[(y, 6), (x, 4), (z, 1)],
0,
)
# split pattern
assert_normalize_to_iter_sum(
z + 2 * y * 3 + 4 * (x // 2),
var_dom([(y, a * n * 4), (x, n * 4), (z, a)]),
[(y, 6), (x // 2, 4), (z, 1)],
0,
)
# iter simplify
assert_normalize_to_iter_sum(
z * 2 + 2 * y * 3 + 4 * (x // 4) + (x % 4),
var_dom([(y, a * n * 4), (x, n * 4), (z, a)]),
[(y, 6), (z, 2), (x, 1)],
0,
)
if __name__ == "__main__":
tvm.testing.main()
| 43,878 | 33.014729 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_extension.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
@tvm.register_extension
class MyTensorView(object):
_tvm_tcode = tvm._ffi.runtime_ctypes.ArgTypeCode.DLTENSOR_HANDLE
def __init__(self, arr):
self.arr = arr
@property
def _tvm_handle(self):
return self.arr._tvm_handle
def test_dltensor_compatible():
dtype = "int64"
n = te.var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.var("i")
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n - 1, "i") as i:
A[i + 1] = A[i] + 1
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "arange"))
f = tvm.build(mod, target="stackvm")
a = tvm.nd.array(np.zeros(10, dtype=dtype))
aview = MyTensorView(a)
f(aview)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
if __name__ == "__main__":
test_dltensor_compatible()
| 1,731 | 30.490909 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tir_stmt_functor_substitute.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.tir.stmt_functor import substitute
class BaseCompare(tvm.testing.CompareBeforeAfter):
def transform(self):
def inner(mod):
func = mod["main"]
vmap = {func.params[0]: 16}
new_func = tvm.tir.PrimFunc(params=[], body=substitute(func.body, vmap))
return tvm.IRModule.from_expr(new_func)
return inner
class TestBasicSubstitute(BaseCompare):
def before(n: T.int32):
for i in range(n):
T.evaluate(i)
def expected():
for i in range(16):
T.evaluate(i)
class TestSubstituteAllocate(BaseCompare):
def before(n: T.int32):
A_data = T.allocate([n], "float32")
T.evaluate(A_data)
def expected():
A_data = T.allocate([16], "float32")
T.evaluate(A_data)
class TestSubstituteBufferLoad(BaseCompare):
def before(n: T.int32):
A_data = T.allocate([n], "float32")
A = T.Buffer(n, "float32", data=A_data)
for i in range(n):
T.evaluate(A[i])
def expected():
A_data = T.allocate([16], "float32")
A = T.Buffer(16, "float32", data=A_data)
for i in range(16):
T.evaluate(A[i])
class TestSubstituteDeclBuffer(BaseCompare):
def before(n: T.int32):
A_data = T.allocate([n], "float32")
A = T.decl_buffer(n, "float32", data=A_data)
T.evaluate(A.data)
def expected():
A_data = T.allocate([16], "float32")
A = T.decl_buffer(16, "float32", data=A_data)
T.evaluate(A.data)
if __name__ == "__main__":
tvm.testing.main()
| 2,453 | 28.214286 | 84 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_sampling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import sys
import numpy
import pytest
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 257, 1470))
B = T.match_buffer(b, (128, 257, 1470))
for i, j, k in T.grid(128, 257, 1470):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def tiled_conv2d_with_padding(
inputs: T.Buffer((1, 224, 224, 3), "float32"),
weight: T.Buffer((7, 7, 3, 64), "float32"),
conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_0,
i5_0,
i6_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_1,
i5_1,
i6_1,
i0_3,
i1_3,
i2_3,
i3_3,
) in T.grid(1, 1, 4, 1, 1, 2, 4, 1, 7, 7, 1, 1, 1, 1, 1, 1, 1, 3, 1, 56, 7, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(112, i1_1_1 * 56 + i1_3)
w = T.axis.spatial(112, i2_0 * 28 + i2_1_1 * 7 + i2_3)
co, rh, rw, rc = T.axis.remap("SRRR", [i3_3, i4_0, i5_0, i6_1])
T.reads(
conv2d_nhwc[n, h, w, co],
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight[rh, rw, rc, co],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc] * weight[rh, rw, rc, co]
)
# pylint: enable=no-member,invalid-name,unused-variable
def test_sample_categorical():
"""Test sample categorical sampling function"""
n = 1000
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
counter = defaultdict(int)
candidates = [5, 2, 7, 1]
probs = [0.15, 0.55, 0.05, 0.25]
for _ in range(n):
v = sch.get(sch.sample_categorical(candidates, probs))
counter[v] += 1
for i, prob in enumerate(probs):
assert (prob - 0.07) * n <= counter[candidates[i]] <= (prob + 0.07) * n
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_categorical_copy():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs) # pylint: disable=invalid-name
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions: # pylint: disable=invalid-name
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy)
def test_sample_categorical_serialize():
"""Check the random variable sampling results after schedule serialization"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [5, 6, 7, 8]
probs = [0.23, 0.19, 0.37, 0.21]
decisions = []
for _ in range(n):
rv = sch.get(sch.sample_categorical(candidates, probs)) # pylint: disable=invalid-name
decisions.append(rv)
new_sch = verify_trace_roundtrip(sch, mod=elementwise)
for i, new_inst in enumerate(new_sch.trace.insts):
assert decisions[i] == candidates[new_sch.trace.decisions[new_inst].value]
def test_sample_perfect_tile_power_of_two():
sch = tir.Schedule(elementwise, debug_mask="all")
i, _, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 128
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_prime():
sch = tir.Schedule(elementwise, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 257
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_composite():
sch = tir.Schedule(elementwise, debug_mask="all")
_, _, i = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 1470
verify_trace_roundtrip(sch, mod=elementwise)
use_sugared_block = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_sample_compute_location(use_sugared_block):
n = 100
sch = tir.Schedule(tiled_conv2d_with_padding, seed=42, debug_mask="all")
if use_sugared_block:
pad_input = "PadInput"
else:
pad_input = sch.get_block("PadInput")
decision_dict = dict()
for _ in range(n):
_ = sch.sample_compute_location(pad_input) # pylint: disable=invalid-name
decision = sch.trace.decisions[sch.trace.insts[-1]]
decision_dict[decision] = decision_dict[decision] + 1 if decision in decision_dict else 1
n_candidates = 8
expected_rate = 1.0 / n_candidates
for _, cnt in decision_dict.items():
numpy.testing.assert_allclose(expected_rate, cnt / n, atol=0.04)
def test_sample_perfect_tile_after_copy():
sch = tir.Schedule(elementwise, debug_mask="all")
sch_copy = sch.copy()
_, _, i = sch.get_loops(sch.get_block("B"))
sch.sample_perfect_tile(i, n=4)
_, _, i = sch_copy.get_loops(sch_copy.get_block("B"))
# Hangs if ForkSeed is not invoked when copying a schedule
sch_copy.sample_perfect_tile(i, n=4)
if __name__ == "__main__":
tvm.testing.main()
| 7,584 | 33.953917 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_mutator_mutate_parallel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from typing import List
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=invalid-name, no-member
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [512, 512])
B = T.match_buffer(b, [512, 512])
C = T.match_buffer(c, [512, 512])
for i, j, k in T.grid(512, 512, 512): # type: ignore
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k]) # type: ignore
with T.init():
C[vi, vj] = 0.0 # type: ignore
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
# pylint: enable=invalid-name, no-member
def _sch(decisions: List[List[int]], ann_val: int) -> Schedule:
sch = Schedule(matmul, debug_mask="all")
# pylint: disable=invalid-name
d0, d1, d2 = decisions
b0 = sch.get_block(name="C", func_name="main")
root = sch.get_block(name="root", func_name="main")
sch.get_consumers(block=b0)
b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2,
n=4,
max_innermost_factor=64,
decision=d0,
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8])
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3,
n=4,
max_innermost_factor=64,
decision=d1,
)
l17, l18, l19, l20 = sch.split(loop=l3, factors=[v13, v14, v15, v16])
v21, v22 = sch.sample_perfect_tile(
loop=l4,
n=2,
max_innermost_factor=64,
decision=d2,
)
l23, l24 = sch.split(loop=l4, factors=[v21, v22])
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True)
sch.annotate(block_or_loop=root, ann_key="meta_schedule.parallel", ann_val=ann_val)
# pylint: enable=invalid-name
return sch
def _make_mutator(target: Target, max_jobs_per_core: int) -> ms.Mutator:
ctx = ms.TuneContext(
mod=matmul,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateParallel(max_jobs_per_core): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_parallel_matmul():
mutator = _make_mutator(
target=Target("llvm --num-cores=16"),
max_jobs_per_core=256,
)
sch = _sch(
decisions=[
[4, 32, 4, 1],
[8, 4, 8, 2],
[512, 1],
],
ann_val=64,
)
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
ann_val = int(trace.insts[-1].inputs[1])
results.add(ann_val)
if len(results) == 3:
break
assert len(results) == 3
assert results == {4, 32, 4096}
if __name__ == """__main__""":
test_mutate_parallel_matmul()
| 3,960 | 32.008333 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_vm_profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm.runtime import profiler_vm
from tvm import relay
from tvm.relay.testing import mlp
@tvm.testing.parametrize_targets
def test_basic(dev, target):
mod, params = mlp.get_workload(batch_size=1)
if not profiler_vm.enabled():
return
exe = relay.vm.compile(mod, target, params=params)
code, lib = exe.save()
des_exe = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = profiler_vm.VirtualMachineProfiler(des_exe, dev)
data = np.random.rand(1, 1, 28, 28).astype("float32")
res = vm.profile(tvm.nd.array(data), func_name="main")
assert "softmax" in str(res)
def test_vm_reshape_and_copy():
target = "llvm"
dev = tvm.gpu()
x_np = np.random.uniform(size=(8, 16)).astype("float32")
x = relay.var("x", shape=(8, 16), dtype="float32")
y = relay.reshape(x, [-1, 4, 8])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert "reshape_tensor" in exec.bytecode
vm = profiler_vm.VirtualMachineProfiler(exec, dev)
vm.profile(tvm.nd.array(x_np))
if __name__ == "__main__":
tvm.testing.main()
| 2,016 | 33.186441 | 62 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_narrow_datatype.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay, te
from tvm.driver.build_module import schedule_to_module
from tvm.script import tir as T
from tvm.tir import const
import tvm.testing
def lower_stmt(params, stmt, target_bits):
func = tvm.tir.PrimFunc(params, stmt)
func = tvm.tir.transform.NarrowDataType(target_bits)(tvm.IRModule.from_expr(func))["main"]
stmt = func.body
return stmt
def lower_sch(sch, args, target_bits, extra_passes=None):
binds = {}
arg_list = []
for x in args:
if isinstance(x, te.tensor.Tensor):
buf = tvm.tir.decl_buffer(x.shape, dtype=x.dtype, name=x.name)
assert x not in binds
binds[x] = buf
arg_list.append(buf)
else:
raise ValueError("args must be Tensor, Buffer or Var")
sch = sch.normalize()
mod = schedule_to_module(sch, args)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
if extra_passes:
for p in extra_passes:
mod = p(mod)
return tvm.tir.transform.NarrowDataType(target_bits)(mod)["main"].body
def test_basic():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
B[i * n + j] = A[i * n + j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
# const shape
# i32 -> i32
check(2, 2, 32, "int32")
# i64 -> i32
check(const(2, dtype="int64"), const(2, dtype="int64"), 32, "int32")
check(const(2**16, dtype="int64"), const(2**16, dtype="int64"), 32, "int64")
# i32 -> i16
check(2, 2, 16, "int16")
check(2**10, 2**10, 16, "int32")
# symbolic shape
check(te.size_var(name="m", dtype="int32"), te.size_var(name="n", dtype="int32"), 32, "int32")
check(te.size_var(name="m", dtype="int64"), te.size_var(name="n", dtype="int64"), 32, "int64")
def test_thread_axis():
def check(m, n, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n], name="B")
B = ib.buffer_ptr(Bb)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", m)
ib.scope_attr(tx, "thread_extent", n)
B[bx * n + tx] = A[bx * n + tx] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.node.var.dtype == target_dtype
assert stmt.body.node.var.dtype == target_dtype
# i32 -> i32
check(2, 32, target_bits=32, target_dtype="int32")
# i64 -> i32
check(const(2, dtype="int64"), const(32, dtype="int64"), target_bits=32, target_dtype="int32")
check(
const(2**30, dtype="int64"),
const(32, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
# i32 -> i16
check(2, 32, target_bits=16, target_dtype="int16")
check(2**14, 32, target_bits=16, target_dtype="int32")
def test_thread_axis_2():
# fmt: off
@tvm.script.ir_module
class Before:
@T.prim_func
def main(T_reshape: T.Buffer((1, 12, 384, 384), "float32"), placeholder_1: T.Buffer((T.int64(1), T.int64(12), T.int64(384), 384), "bool"), T_where: T.Buffer((T.int64(1), T.int64(12), T.int64(384), 384), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0_i1_i2_i3_fused_1 in T.thread_binding(T.int64(256), thread="blockIdx.x"):
for i0_i1_i2_i3_fused_2 in T.thread_binding(T.int64(1024), thread="threadIdx.x"):
for i0_i1_i2_i3_fused_0 in T.serial(T.int64(7)):
with T.block("T_where"):
ax0 = T.axis.spatial(T.int64(1), T.int64(0))
ax1 = T.axis.spatial(T.int64(12), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(1769472) // T.int64(147456))
ax2 = T.axis.spatial(T.int64(384), ((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(147456) // T.int64(384))
ax3 = T.axis.spatial(384, T.cast(((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2) % T.int64(384), "int32"))
T.where((i0_i1_i2_i3_fused_0 * T.int64(256) + i0_i1_i2_i3_fused_1) * T.int64(1024) + i0_i1_i2_i3_fused_2 < T.int64(1769472))
T.reads(placeholder_1[ax0, ax1, ax2, ax3], T_reshape[ax0, ax1, ax2, ax3])
T.writes(T_where[ax0, ax1, ax2, ax3])
T_where[ax0, ax1, ax2, ax3] = T.Select(T.cast(placeholder_1[ax0, ax1, ax2, ax3], "int32") != 0, T.float32(-1000000000), T_reshape[ax0, ax1, ax2, ax3])
# fmt: on
# TODO(@junrushao1994): make this test more "unit" after the new TVMScript printer/parser lands
tvm.lower(Before)
def test_multilanes():
def check(m, lanes, target_bits, target_dtype):
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer((m,), dtype="float32x{}".format(lanes), name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i", dtype=m.dtype) as i:
B[i] = A[i] + 1
A[0] = B[1]
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.seq[0].loop_var.dtype == target_dtype
# i32 -> i32
check(const(2**10, dtype="int32"), 2, target_bits=32, target_dtype="int32")
# i64 -> i32
check(const(2**10, dtype="int64"), 2, target_bits=32, target_dtype="int32")
check(const(2**32, dtype="int64"), 2, target_bits=32, target_dtype="int64")
# i32 -> i16
check(const(2**10, dtype="int32"), 2, target_bits=16, target_dtype="int16")
check(const(2**16, dtype="int32"), 2, target_bits=16, target_dtype="int32")
def test_reduce():
def check(m, target_bits, target_dtype):
A = te.placeholder((m,), name="A", dtype="float32")
k = te.reduce_axis((0, m), "k")
B = te.compute((), lambda *idx: te.sum(A[k], axis=k), name="B")
s = te.create_schedule(B.op)
stmt = lower_sch(s, [A, B], target_bits)
assert stmt[1].loop_var.dtype == target_dtype
# i32 -> i32
check(const(64, dtype="int32"), 32, "int32")
# i64 -> i32
check(const(64, dtype="int64"), 32, "int32")
# i32 -> i16
check(const(64, dtype="int32"), 16, "int16")
check(const(2**16, dtype="int32"), 16, "int32")
# symbolic
check(te.var("n", dtype="int32"), 32, "int32")
check(te.var("n", dtype="int64"), 32, "int64")
def test_slice():
def check(m, n, target_bits, target_dtype):
# The index may overflow in B, while not in A
ib = tvm.tir.ir_builder.create()
Ab = tvm.tir.decl_buffer([m * n], name="A")
A = ib.buffer_ptr(Ab)
Bb = tvm.tir.decl_buffer([m * n * 2], name="B")
B = ib.buffer_ptr(Bb)
with ib.for_range(0, m, name="i") as i:
with ib.for_range(0, n, name="j") as j:
A[i * n + j] = B[i * 2 * n + 2 * j] + 1
stmt = ib.get()
stmt = lower_stmt([Ab, Bb], stmt, target_bits)
assert stmt.loop_var.dtype == target_dtype
assert stmt.body.loop_var.dtype == target_dtype
# The maximum index is (2**15 * 2**15 - 1) * 2 <= 2**31 - 1
check(const(2**15, "int64"), const(2**15, "int64"), target_bits=32, target_dtype="int32")
# The maximum index is (2**15 * 2**15 - 1 + 2**15) * 2 > 2**31 - 1
check(
const(2**15, "int64"), const((2**15 + 1), "int64"), target_bits=32, target_dtype="int64"
)
def test_relay_basic():
engine = relay.backend.te_compiler.get()
def check(shapex, shapey, target_bits, target_dtype):
x = relay.var("x", shape=shapex)
y = relay.var("y", shape=shapey)
z = relay.add(x, y)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
# outer loop
assert stmt.loop_var.dtype == target_dtype
# inner loop
if len(shapex) > 1 or len(shapey) > 1:
assert stmt.body.loop_var.dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
(1, const(2**15 + 1, "int64")),
target_bits=32,
target_dtype="int64",
)
check(
(const(2**16, "int64"), const(2**15, "int64")),
(1, const(2**15, "int64")),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**31, "int64"),), (const(2**31, "int64"),), target_bits=32, target_dtype="int32"
)
check(
(const(2**31 + 1, "int64"),),
(const(2**31 + 1, "int64"),),
target_bits=32,
target_dtype="int64",
)
def test_relay_take():
engine = relay.backend.te_compiler.get()
def check(shape, index, target_bits, target_dtype):
x = relay.var("x", shape=shape)
y = relay.op.take(x, indices=index)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
func = mod["main"]
z = engine.lower(func, "llvm")
stmt = lower_sch(z.schedule, tuple(z.inputs) + tuple(z.outputs), 32)
assert stmt.value.indices[0].dtype == target_dtype
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(0, dtype="int64"),
target_bits=32,
target_dtype="int32",
)
check(
(const(2**16, "int64"), const(2**15 + 1, "int64")),
relay.const(2**31, dtype="int64"),
target_bits=32,
target_dtype="int64",
)
def test_ramp_dtype_consistency():
"""
for (i :int64, (int64)0, (int64)4) {
A[ramp(i*(int64)2, (int64)1, 2)] = cast(int64, 2 ** 31 - 1) * i;
}
The infer result:
base: int64 -> int64 (since i is involved in another int64 expr)
stride: int64 -> int32
Thus ramp should still use int64 for both stride and base after rewrite.
"""
n = tvm.tir.IntImm("int64", 4)
m = tvm.tir.IntImm("int64", 2)
A = te.compute((n, m), lambda i, j: tvm.tir.Cast("int64", 2**31 - 1) * i, name="A")
s = te.create_schedule(A.op)
s[A].vectorize(A.op.axis[1])
lower_sch(s, [A], 32, extra_passes=[tvm.tir.transform.VectorizeLoop()])
def test_condition():
@T.prim_func
def before(A: T.Buffer((128,), "float32"), B: T.Buffer((130,), "float32")):
for i, j in T.grid(T.int64(2), T.int64(65)):
if i * T.int64(65) + j >= T.int64(0) and i * T.int64(65) + j < T.int64(128):
A[i * T.int64(65) + j] = 0.0
for i, j in T.grid(T.int64(2), T.int64(65)):
B[i * T.int64(65) + j] = T.if_then_else(
i * T.int64(65) + j >= T.int64(0) and i * T.int64(65) + j < T.int64(128),
A[i * T.int64(65) + j],
0.0,
dtype="float32",
)
@T.prim_func
def expected_after(A: T.Buffer(128, "float32"), B: T.Buffer(130, "float32")):
for i, j in T.grid(2, 65):
if i * 65 + j >= 0 and i * 65 + j < 128:
A[i * 65 + j] = T.float32(0)
for i, j in T.grid(2, 65):
B[i * 65 + j] = T.if_then_else(
i * 65 + j >= 0 and i * 65 + j < 128, A[i * 65 + j], T.float32(0), dtype="float32"
)
after = tvm.tir.transform.NarrowDataType(32)(tvm.IRModule.from_expr(before))["main"]
tvm.ir.assert_structural_equal(after, expected_after)
def test_block():
@T.prim_func
def before(A: T.Buffer((128,), "float32"), B: T.Buffer((128,), "float32")):
for i in T.serial(0, T.int64(16)):
for j in T.serial(0, T.int64(8)):
with T.block():
vi = T.axis.spatial(T.int64(128), i * T.int64(8) + j)
B[vi] = A[vi] + T.float32(1)
@T.prim_func
def expected_after(A: T.Buffer((128,), "float32"), B: T.Buffer((128,), "float32")):
for i in T.serial(0, T.int32(16)):
for j in T.serial(0, T.int32(8)):
with T.block():
vi = T.axis.spatial(T.int32(128), i * T.int32(8) + j)
B[vi] = A[vi] + T.float32(1)
after = tvm.tir.transform.NarrowDataType(32)(tvm.IRModule.from_expr(before))["main"]
tvm.ir.assert_structural_equal(after, expected_after)
def test_avg_pool2d():
@T.prim_func
def before(PSUM: T.Buffer((313600,), "int32"), PAVG: T.Buffer((313600,), "int32")):
for j in T.parallel(T.int64(0), T.int64(280)):
for i in T.serial(T.int64(0), T.int64(35)):
for vi in T.vectorized(T.int64(0), T.int64(32)):
PAVG[(((j * T.int64(1120)) + (i * T.int64(32))) + vi)] = T.cast(
T.Div(
T.cast(PSUM[(((j * T.int64(1120)) + (i * T.int64(32))) + vi)], "int64"),
T.max(
(
(
(
T.min(
T.int64(1),
(T.int64(34) - T.floormod(j, T.int64(35))),
)
+ T.int64(2)
)
- T.max(
(T.int64(1) - T.floormod(j, T.int64(35))), T.int64(0)
)
)
* (
(T.min(T.int64(1), (T.int64(34) - i)) + T.int64(2))
- T.max((T.int64(1) - i), T.int64(0))
)
),
T.int64(1),
),
),
"int32",
)
@T.prim_func
def expected_after(PSUM: T.Buffer((313600,), "int32"), PAVG: T.Buffer((313600,), "int32")):
for j in T.parallel(T.int32(0), T.int32(280)):
for i in T.serial(T.int32(0), T.int32(35)):
for vi in T.vectorized(T.int32(0), T.int32(32)):
PAVG[(((j * T.int32(1120)) + (i * T.int32(32))) + vi)] = T.Div(
PSUM[(((j * T.int32(1120)) + (i * T.int32(32))) + vi)],
(
(
(
T.min(T.int32(1), (T.int32(34) - T.floormod(j, T.int32(35))))
+ T.int32(2)
)
- T.max((T.int32(1) - T.floormod(j, T.int32(35))), T.int32(0))
)
* (
(T.min(T.int32(1), (T.int32(34) - i)) + T.int32(2))
- T.max((T.int32(1) - i), T.int32(0))
)
),
)
after = tvm.tir.transform.NarrowDataType(32)(tvm.IRModule.from_expr(before))
after = tvm.tir.transform.Simplify()(after)
tvm.ir.assert_structural_equal(after["main"], expected_after)
if __name__ == "__main__":
tvm.testing.main()
| 17,019 | 40.31068 | 231 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_measure_callback.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import re
import tempfile
from typing import List
import pytest
import tvm
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.tir.schedule import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def test_meta_schedule_measure_callback():
@ms.derived_object
class FancyMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
assert len(measure_candidates) == 1
tvm.ir.assert_structural_equal(measure_candidates[0].sch.mod, Matmul)
assert (
len(builder_results) == 1
and builder_results[0].error_msg is None
and builder_results[0].artifact_path == "test_build"
)
assert (
len(runner_results) == 1
and runner_results[0].error_msg is None
and len(runner_results[0].run_secs) == 2
)
measure_callback = FancyMeasureCallback()
measure_callback.apply(
ms.task_scheduler.RoundRobin(),
0,
[ms.MeasureCandidate(Schedule(Matmul), None)],
[ms.builder.BuilderResult("test_build", None)],
[ms.runner.RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_fail():
@ms.derived_object
class FailingMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
raise ValueError("test")
measure_callback = FailingMeasureCallback()
with pytest.raises(ValueError, match="test"):
measure_callback.apply(
ms.task_scheduler.RoundRobin(),
0,
[ms.MeasureCandidate(Schedule(Matmul), None)],
[ms.builder.BuilderResult("test_build", None)],
[ms.runner.RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_as_string():
@ms.derived_object
class NotSoFancyMeasureCallback(ms.measure_callback.PyMeasureCallback):
def apply(
self,
task_scheduler: ms.task_scheduler.TaskScheduler,
task_id: int,
measure_candidates: List[ms.MeasureCandidate],
builder_results: List[ms.builder.BuilderResult],
runner_results: List[ms.runner.RunnerResult],
) -> None:
pass
measure_callback = NotSoFancyMeasureCallback()
pattern = re.compile(r"meta_schedule.NotSoFancyMeasureCallback\(0x[a-f|0-9]*\)")
assert pattern.match(str(measure_callback))
def test_meta_schedule_measure_callback_update_cost_model_with_zero():
@ms.derived_object
class AllZeroRunnerFuture(ms.runner.PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> ms.runner.RunnerResult:
return ms.runner.RunnerResult([0.0, 0.0], None)
@ms.derived_object
class AllZeroRunner(ms.runner.PyRunner):
def run(self, runner_inputs: List[ms.runner.RunnerInput]) -> List[ms.runner.RunnerResult]:
return [AllZeroRunnerFuture() for _ in runner_inputs]
with tempfile.TemporaryDirectory() as work_dir:
ms.tune_tir(
mod=Matmul,
target="llvm -num-cores=1",
work_dir=work_dir,
max_trials_global=10,
runner=AllZeroRunner(),
measure_callbacks=[ms.measure_callback.UpdateCostModel()],
)
def test_meta_schedule_measure_callback_update_cost_model_with_runtime_error():
@ms.derived_object
class EmptyRunnerFuture(ms.runner.PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> ms.runner.RunnerResult:
return ms.runner.RunnerResult(None, "error")
@ms.derived_object
class EmptyRunner(ms.runner.PyRunner):
def run(self, runner_inputs: List[ms.runner.RunnerInput]) -> List[ms.runner.RunnerResult]:
return [EmptyRunnerFuture() for _ in runner_inputs]
with tempfile.TemporaryDirectory() as work_dir:
ms.tune_tir(
mod=Matmul,
target="llvm -num-cores=1",
work_dir=work_dir,
max_trials_global=10,
runner=EmptyRunner(),
measure_callbacks=[ms.measure_callback.UpdateCostModel()],
)
if __name__ == "__main__":
test_meta_schedule_measure_callback()
test_meta_schedule_measure_callback_fail()
test_meta_schedule_measure_callback_as_string()
test_meta_schedule_measure_callback_update_cost_model_with_zero()
test_meta_schedule_measure_callback_update_cost_model_with_runtime_error()
| 6,777 | 36.241758 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_cpu_dot_product.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import logging
import tempfile
from typing import Optional
import numpy as np # type: ignore
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm._ffi import register_func
from tvm.tir.schedule import BlockRV, Schedule
from tvm.tir.schedule.analysis import has_block
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
from tvm.tir.tensor_intrin.x86 import AVX512_DOT_16x4_INTRIN as AVX512_INTRIN
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
def _schedule_dense(m: Optional[int], do_tune: bool, intrin=VNNI_INTRIN):
"""Manually schedule a dense block, created from TE compute op via CreatePrimFunc,
using VNNI or AVX512 instructions.
"""
def schedule_fn(sch, dense_block: Optional[BlockRV] = None) -> bool:
if sch.mod.attrs is not None and "dense" not in sch.mod.attrs["task_name"]:
return False
if dense_block is None:
assert has_block(sch, "compute")
dense_block = sch.get_block("compute")
assert "dense_int8" in sch.get(dense_block).annotations["schedule_rule"]
post_blocks = sch.get_consumers(dense_block)
if len(post_blocks) > 0:
# Fuse all intermediate post ops into the last op.
# This is equivalent to the traverse_inline function used in TE schedules.
while True:
next_post_blocks = []
for post_block in post_blocks:
next_consumers = sch.get_consumers(post_block)
if len(next_consumers) > 0:
sch.compute_inline(post_block)
next_post_blocks += next_consumers
if len(next_post_blocks) == 0:
assert len(post_blocks) == 1
outer_block = post_blocks[0]
a_y, a_x = sch.get_loops(outer_block)[-2:]
break
post_blocks = next_post_blocks
else:
a_y, a_x, _ = sch.get_loops(dense_block)[-3:]
outer_block = dense_block
if do_tune:
y_factors = sch.sample_perfect_tile(a_y, n=2, max_innermost_factor=128)
a_yo, a_yi = sch.split(a_y, factors=y_factors)
else:
a_yo, a_yi = sch.split(a_y, factors=[None, min(m, 64)])
a_xo, a_xi = sch.split(a_x, factors=[None, 16])
sch.reorder(a_yo, a_xo, a_yi, a_xi)
fused = sch.fuse(a_yo, a_xo)
if outer_block != dense_block:
# Handle the case when dense is fused with post ops.
sch.vectorize(a_xi)
sch.compute_at(dense_block, a_yi)
a_xi, a_k = sch.get_loops(dense_block)[-2:]
a_ko, a_ki = sch.split(a_k, factors=[None, 4])
sch.reorder(a_ko, a_xi, a_ki)
# We need to parallelize before decompose_reduction, otherwise the so-called "Compact dataflow"
# condition is violated.
sch.parallel(fused)
dec = sch.decompose_reduction(dense_block, a_ko)
init_loop = sch.get_loops(dec)[-1]
sch.vectorize(init_loop)
sch.tensorize(a_xi, intrin)
return True
return schedule_fn
def _relay_dense(m, n, k):
data = relay.var("data", shape=(m, k), dtype="uint8")
weight = relay.var("weight", shape=(n, k), dtype="int8")
bias = relay.var("bias", shape=(n,), dtype="int32")
# dense is tuned by the TIR schedule above, bmm is scheduled by TE (topi/x86/batch_matmul.py)
dense = relay.nn.dense(data, weight, out_dtype="int32")
bias_add = relay.nn.bias_add(dense, bias) + relay.const(1, dtype="int32")
out = relay.nn.batch_matmul(
relay.cast(relay.expand_dims(bias_add, 0), "uint8"),
relay.cast(relay.expand_dims(bias_add, 0), "int8"),
out_dtype="int32",
)
relay_mod = tvm.IRModule.from_expr(out)
data = np.random.randint(0, 5, size=(m, k), dtype="uint8")
params = {
"weight": np.random.randint(0, 5, size=(n, k), dtype="int8"),
"bias": np.random.randint(0, 5, size=(n,), dtype="int32"),
}
def f_check(lib, dev):
ref = (
relay.create_executor(
"vm",
mod=relay_mod,
device=dev,
target="llvm",
)
.evaluate()(data, params["weight"], params["bias"])
.numpy()
)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data)
runtime.run()
out = runtime.get_output(0).numpy()
np.testing.assert_equal(out, ref)
return relay_mod, params, f_check
def schedule_16x4_dense_fn_database(target, intrin, m=1024, n=1024, k=1024):
dev = tvm.cpu(0)
relay_mod, params, f_check = _relay_dense(m, n, k)
with ms.database.ScheduleFnDatabase(
_schedule_dense(
m=m,
do_tune=False,
intrin=intrin,
)
), tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
# pylint: disable=W0105
"""The log should say
Warning: Cannot find workload: tvmgen_default_fused_expand_dims
Warning: Cannot find workload: tvmgen_default_fused_cast
Warning: Cannot find workload: tvmgen_default_fused_cast_1
Warning: Cannot find workload: tvmgen_default_fused_nn_batch_matmul
This means batch matmul and others are scheduled by TE, and dense (the one not warned)
is found in the meta schedule tuning database during compilation
"""
# pylint: enable=W0105
lib = relay.build(relay_mod, target=target, params=params)
f_check(lib, dev)
@tvm.testing.requires_cascadelake
def test_vnni_schedule_fn_database():
target = tvm.target.Target("llvm -keys=x86,cpu -mcpu=cascadelake -num-cores=4")
schedule_16x4_dense_fn_database(target, VNNI_INTRIN)
@tvm.testing.requires_skylake_avx512
def test_avx512_schedule_fn_database():
target = tvm.target.Target("llvm -keys=x86,cpu -mcpu=skylake-avx512 -num-cores=4")
schedule_16x4_dense_fn_database(target, AVX512_INTRIN, 16, 16, 16)
def schedule_16x4_dense_fn_tune(target, intrin, m=1024, n=1024, k=1024):
# pylint: disable=W0105
"""
We can inject and apply a custom TIR scheduling to a TE compute of interest, using
the "schedule_rule" annotation. For example, in topi/x86/dense.py we have the following
declaration for int8 dense targeting the VNNI or AVX512 instructions.
C = te.compute(
...
attrs={"schedule_rule": "meta_schedule.x86.dense_int8"},
)
When the MetaSchedule encounters a TensorIR block with the "schedule_rule" annotation,
it looks up the packed func registry for a function that is associated with the given schedule
rule key ("meta_schedule.x86.dense_int8" in this example). The signature of such custom
schedule functions must be
(tir.schedule.Schedule, tir.schedule.BlockRV) -> [tir.schedule.Schedule].
The BlockRV argument corresponds to the TE compute annotated with "schedule_rule".
The relevant code is in `src/meta_schedule/space_generator/apply_custom_rule.cc`.
"""
def schedule_rule_dense_16x4(sch: Schedule, dense_block: BlockRV):
_schedule_dense(m=None, do_tune=True, intrin=intrin)(sch, dense_block)
return [sch]
register_func("meta_schedule.x86.dense_int8", schedule_rule_dense_16x4, override=True)
dev = tvm.cpu(0)
relay_mod, params, f_check = _relay_dense(m, n, k)
extracted_tasks = ms.relay_integration.extract_tasks(relay_mod, target, params)
with tempfile.TemporaryDirectory() as work_dir:
# postprocs=lambda: [] is important to prevent default post processors from
# tampering with the manual schedule.
tasks, weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
list(
filter(
lambda task: "dense" in task.task_name,
extracted_tasks,
)
),
work_dir=work_dir,
space=ms.space_generator.PostOrderApply(
f_block_filter=None,
sch_rules="from-target",
postprocs=[],
mutator_probs="from-target",
),
)
database = ms.relay_integration.tune_tasks(
tasks=tasks,
task_weights=weights,
work_dir=work_dir,
max_trials_per_task=32,
max_trials_global=20000,
)
with database, tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
# pylint: disable=W0105
"""The log should say
Warning: Cannot find workload: tvmgen_default_fused_expand_dims
Warning: Cannot find workload: tvmgen_default_fused_cast
Warning: Cannot find workload: tvmgen_default_fused_cast_1
Warning: Cannot find workload: tvmgen_default_fused_nn_batch_matmul
This means batch matmul and others are scheduled by TE, and dense (the one not warned)
is found in the meta schedule tuning database during compilation
"""
# pylint: enable=W0105
lib = relay.build(relay_mod, target=target, params=params)
f_check(lib, dev)
@tvm.testing.requires_cascadelake
def test_vnni_schedule_fn_tune():
target = tvm.target.Target("llvm -keys=x86,cpu -mcpu=cascadelake -num-cores=4")
schedule_16x4_dense_fn_tune(target, VNNI_INTRIN)
@tvm.testing.requires_skylake_avx512
def test_avx512_schedule_fn_tune():
target = tvm.target.Target("llvm -keys=x86,cpu -mcpu=skylake-avx512 -num-cores=4")
schedule_16x4_dense_fn_tune(target, AVX512_INTRIN, 16, 16, 16)
if __name__ == """__main__""":
test_vnni_schedule_fn_database()
test_avx512_schedule_fn_database()
test_vnni_schedule_fn_tune()
test_avx512_schedule_fn_tune()
| 10,882 | 38.574545 | 103 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_reduce_branching_through_overcompute.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
import pytest
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
use_dataflow_analysis = False
def transform(self):
def inner(mod):
config = {
"tir.ReduceBranchingThroughOvercompute": {
"use_dataflow_analysis": self.use_dataflow_analysis,
}
}
with tvm.transform.PassContext(config=config):
mod = tvm.tir.transform.ReduceBranchingThroughOvercompute()(mod)
return mod
return inner
class TestIntroduceNoOp(BaseBeforeAfter):
"""Remove a conditional by introducing a no-op
If the else_case can have a no-op added in order to be identical
to the then_case, then the conditional can be removed.
"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 14:
A[i] = 1
T.evaluate(0)
else:
A[i] = 1
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 1
T.evaluate(0)
class TestIntroduceAdditionOfZero(BaseBeforeAfter):
"""Insert a conditionally no-op statement
Overcompute doesn't need to explicitly be a no-op, and can be
something that simplifies to a no-op. Here, when i==0, the
expression simplifies to ``A[0] = A[0]``, which is a no-op.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(1, "int32")):
for i in T.serial(16):
if i > 0:
A[0] = A[0] + i * i
def expected(A: T.Buffer(1, "int32")):
for i in T.serial(16):
A[0] = A[0] + i * i
class TestIntroduceAdditionOfKnownZeroInBuffer(BaseBeforeAfter):
"""Insert a conditionally no-op statement
Proving that the overcompute is a no-op may use known values that
are present in a buffer.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32"), B: T.Buffer(1, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(i < 14 or A[i] == 0))
B[0] = 0
for i in T.serial(16):
if i < 14:
B[0] = B[0] + A[i]
def expected(A: T.Buffer(16, "int32"), B: T.Buffer(1, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(i < 14 or A[i] == 0))
B[0] = 0
for i in T.serial(16):
B[0] = B[0] + A[i]
class TestIntroduceOverwrittenWrite(BaseBeforeAfter):
"""Insert a write that is later overwritten.
Given two sequential writes to the same location without a read
occurring in-between, the first is a no-op. Therefore, the
conditional in the first loop can be removed, with any temporary
values overwritten by the second loop.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 14:
A[i] = 1
for i in T.serial(16):
if i >= 14:
A[i] = 2
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 1
for i in T.serial(16):
if i >= 14:
A[i] = 2
class TestMaintainValuesUsedLater(BaseBeforeAfter):
"""Do not insert writes that would be used later.
As TestIntroduceOverwrittenWrite, except that the values stored at
A[14] and A[15] are used by the second loop. Overwriting them in
the first loop would change the result, so the overcompute would
not be valid.
"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 14:
A[i] = 1
for i in T.serial(16):
if i >= 14:
A[i] = A[i] + 1
expected = before
class TestIdentifyOverwrittenWriteFromEquivalentExpressions(BaseBeforeAfter):
"""Insert a write that is later overwritten.
As TestIntroduceOverwrittenWrite, but the conditionals used in the
first and second loop have different structures while referring to
the same elements.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 14:
A[i] = 1
for io, ii in T.grid(4, 4):
if io == 3 and ii >= 2:
A[4 * io + ii] = 2
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 1
for io, ii in T.grid(4, 4):
if io == 3 and ii >= 2:
A[4 * io + ii] = 2
class TestIntroduceSupersetOverwrittenWrite(BaseBeforeAfter):
"""Insert a write that is later overwritten.
As TestIntroduceOverwrittenWrite, but the elements written in the
second loop are not distinct from the elements in the first loop.
So long as the writes introduced by overcompute in the first loop
are a subset of the writes present in the second loop, the
overcompute can be introduced.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 14:
A[i] = 1
for i in T.serial(16):
if i >= 14:
A[i] = 2
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 1
for i in T.serial(16):
if i >= 14:
A[i] = 2
if __name__ == "__main__":
tvm.testing.main()
| 6,271 | 27.509091 | 80 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_search_strategy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule SearchStrategy """
# pylint: disable=missing-function-docstring
from typing import List
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm.meta_schedule.utils import derived_object
from tvm.meta_schedule.testing.dummy_object import DummyMutator
from tvm.script import tir as T
from tvm.tir.schedule import Schedule, Trace
MATMUL_M = 32
# pylint: disable=missing-class-docstring,invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument, unbalanced-tuple-unpacking
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # type: ignore
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (32, 32), "float32")
B = T.match_buffer(b, (32, 32), "float32")
C = T.match_buffer(c, (32, 32), "float32")
for i, j, k in T.grid(32, 32, 32):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0 # type: ignore
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=missing-class-docstring,invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def _is_trace_equal(sch_1: Schedule, sch_2: Schedule, remove_decisions=True) -> bool:
if remove_decisions:
trace_1 = Trace(sch_1.trace.insts, {})
trace_2 = Trace(sch_2.trace.insts, {})
else:
trace_1 = sch_1.trace
trace_2 = sch_2.trace
return str(trace_1) == str(trace_2)
def _schedule_matmul(sch: Schedule):
block = sch.get_block("matmul")
i, j, k = sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = sch.split(i, sch.sample_perfect_tile(i, n=4))
j_0, j_1, j_2, j_3 = sch.split(j, sch.sample_perfect_tile(j, n=4))
k_0, k_1 = sch.split(k, sch.sample_perfect_tile(k, n=2))
sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
@pytest.mark.parametrize(
"TestClass",
[
ms.search_strategy.ReplayFunc,
ms.search_strategy.ReplayTrace,
],
)
def test_meta_schedule_replay_func(
TestClass: ms.search_strategy.SearchStrategy,
): # pylint: disable = invalid-name
num_trials_per_iter = 7
max_trials_per_task = 20
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul, postprocs=[]),
search_strategy=TestClass(),
)
strategy = context.search_strategy
spaces = context.space_generator.generate_design_space(context.mod)
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=spaces,
)
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
)
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert num_trials_each_iter == [7, 7, 6]
def test_meta_schedule_evolutionary_search(): # pylint: disable = invalid-name
def _schedule_matmul_small(sch: Schedule):
block = sch.get_block("matmul")
_, j, k = sch.get_loops(block=block)
_, _ = sch.split(j, sch.sample_perfect_tile(j, n=2))
_, _ = sch.split(k, sch.sample_perfect_tile(k, n=2))
num_trials_per_iter = 10
max_trials_per_task = 2000
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul_small,
sch_rules=[],
postprocs=[],
mutator_probs={
DummyMutator(): 1.0,
},
),
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_count=10,
eps_greedy=0.9,
),
target=tvm.target.Target("llvm"),
num_threads=1, # because we are using a mutator from the python side
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
)
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert sum(num_trials_each_iter) == 25
assert num_trials_each_iter.count(0) < 5
def test_meta_schedule_evolutionary_search_early_stop(): # pylint: disable = invalid-name
def _schedule_matmul_empty(sch: Schedule):
return sch
(correct_sch,) = ms.space_generator.ScheduleFn(sch_fn=_schedule_matmul).generate_design_space(
Matmul
)
num_trials_per_iter = 10
max_trials_per_task = 100
context = ms.TuneContext(
mod=Matmul,
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_count=10,
eps_greedy=0.9,
),
space_generator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul_empty,
sch_rules=[],
postprocs=[],
mutator_probs={
DummyMutator(): 1.0,
},
),
target=tvm.target.Target("llvm"),
num_threads=1,
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
num_trials_each_iter: List[int] = []
candidates = strategy.generate_measure_candidates()
while candidates is not None:
num_trials_each_iter.append(len(candidates))
runner_results: List[ms.runner.RunnerResult] = []
for candidate in candidates:
_is_trace_equal(
candidate.sch,
correct_sch,
remove_decisions=(isinstance(strategy, ms.search_strategy.ReplayTrace)),
)
runner_results.append(
ms.runner.RunnerResult(
run_secs=[0.11, 0.41, 0.54],
error_msg=None,
),
)
strategy.notify_runner_results(candidates, runner_results)
candidates = strategy.generate_measure_candidates()
strategy.post_tuning()
assert num_trials_each_iter == [1, 0, 0, 0, 0]
def test_meta_schedule_evolutionary_search_fail_init_population(): # pylint: disable = invalid-name
@derived_object
class AlwaysFailPostproc(ms.postproc.PyPostproc):
"""A postproc that always fails."""
def _initialize_with_tune_context(self, context: ms.TuneContext) -> None:
pass
def apply(self, sch: Schedule) -> bool:
return False
def clone(self) -> "AlwaysFailPostproc":
return AlwaysFailPostproc()
def __str__(self) -> str:
return "AlwaysFailPostproc"
num_trials_per_iter = 10
max_trials_per_task = 2000
context = ms.TuneContext(
mod=Matmul,
space_generator=ms.space_generator.ScheduleFn(
sch_fn=_schedule_matmul,
sch_rules=[],
postprocs=[AlwaysFailPostproc()],
mutator_probs={
DummyMutator(): 1.0,
},
),
search_strategy=ms.search_strategy.EvolutionarySearch(
population_size=5,
init_measured_ratio=0.1,
init_min_unmeasured=50,
genetic_num_iters=3,
genetic_mutate_prob=0.5,
genetic_max_fail_count=10,
eps_greedy=0.9,
),
target=tvm.target.Target("llvm"),
num_threads=1, # because we are using a mutator from the python side
)
strategy = context.search_strategy
strategy.pre_tuning(
max_trials=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
design_spaces=context.space_generator.generate_design_space(context.mod),
database=ms.database.MemoryDatabase(),
cost_model=ms.cost_model.RandomModel(),
)
candidates = strategy.generate_measure_candidates()
assert candidates is None
if __name__ == "__main__":
test_meta_schedule_replay_func(ms.search_strategy.ReplayFunc)
test_meta_schedule_replay_func(ms.search_strategy.ReplayTrace)
test_meta_schedule_evolutionary_search()
test_meta_schedule_evolutionary_search_early_stop()
test_meta_schedule_evolutionary_search_fail_init_population()
| 11,370 | 35.098413 | 146 | py |
tvm | tvm-main/tests/python/unittest/test_roofline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import csv
import json
import os
import platform
from io import StringIO
import numpy as np
import pytest
import tvm.testing
import tvm.utils
from tvm import relay, rpc
from tvm.contrib import utils
from tvm.contrib.debugger import debug_executor
from tvm.relay.testing import mlp
from tvm.runtime import profiler_vm
from tvm.runtime.profiling import Report
from tvm.script import tir as T
@tvm.testing.requires_llvm
@pytest.mark.parametrize("dtype", ["float32", "int8", "int32"])
def test_estimate_peak_flops_cpu(dtype):
server = rpc.Server(key="roofline_flops_cpu")
remote = rpc.connect("127.0.0.1", server.port, key="roofline_flops_cpu")
target = tvm.target.Target("llvm -mattr=+fma,+avx2")
dev = remote.device(str(target))
# This test uses vectorized instructions so we need a target that supports them
flops = tvm.utils.roofline.x86.estimate_peak_fma_vector_flops(target, dev, remote, "float32")
# Assume we can achieve 1 GFLOP/s per thread, which is 1 FLOP per cycle on a 1GHz cpu.
assert (
flops > 10**9 and flops < 10**14
), f"FLOP/s should be between 10^9 and 10^14, but it is {flops}"
@tvm.testing.requires_cuda
def test_estimate_peak_flops_gpu():
server = rpc.Server(key="roofline_flops_gpu")
remote = rpc.connect("127.0.0.1", server.port, key="roofline_flops_gpu")
target = tvm.target.Target("cuda")
dev = remote.device(str(target))
# This test uses vectorized instructions so we need a target that supports them
flops = tvm.utils.roofline.cuda.estimate_peak_flops_tensorcore(target, dev, remote)
# should be able to hit a TFLOP/s with tensor cores
assert (
flops > 10**12 and flops < 10**14
), f"FLOP/s should be between 10^12 and 10^14, but it is {flops}"
# this test should run on all gpus
flops = tvm.utils.roofline.cuda.estimate_peak_flops_fma(target, dev, remote, "float32")
# most gpus since 2016 should be able to hit a TFLOP/s with fma instructions
assert (
flops > 10**12 and flops < 10**14
), f"FLOP/s should be between 10^12 and 10^14, but it is {flops}"
@tvm.testing.skip_if_32bit(reason="Cannot allocate enough memory on i386")
@tvm.testing.requires_llvm
def test_estimate_peak_bandwidth_cpu():
server = rpc.Server(key="roofline_bandwidth_cpu")
remote = rpc.connect("127.0.0.1", server.port, key="roofline_bandwidth_cpu")
target = tvm.target.Target("llvm -mattr=+fma,+avx2")
dev = remote.device(str(target))
# This test uses vectorized instructions so we need a target that supports them
bandwidth = tvm.utils.roofline.x86.estimate_peak_bandwidth_dram(target, dev, remote)
# Assume we can achieve 1 GB/s. DDR2 should transfer somewhere around 6
# GB/s, so this should leave enough wiggle room.
assert (
bandwidth > 10**9 and bandwidth < 10**12
), f"Bandwidth should be between 10^9 and 10^12, but it is {bandwidth}"
@tvm.testing.requires_cuda
def test_estimate_peak_bandwidth_gpu():
server = rpc.Server(key="roofline_bandwidth_gpu")
remote = rpc.connect("127.0.0.1", server.port, key="roofline_bandwidth_gpu")
target = tvm.target.Target("cuda")
dev = remote.device(str(target))
# This test uses vectorized instructions so we need a target that supports them
bandwidth = tvm.utils.roofline.cuda.estimate_peak_bandwidth_global_mem(target, dev, remote)
# should be able to hit a 100 GB/s on a GPU. GTX 280 hits 140 GB/s and
# it is really old.
assert (
bandwidth > 10**11 and bandwidth < 10**13
), f"Bandwidth should be between 10^9 and 10^12, but it is {bandwidth}"
@tvm.testing.skip_if_32bit(reason="Cannot allocate enough memory on i386")
@tvm.testing.parametrize_targets("llvm -mattr=+fma,+avx2", "cuda")
def test_roofline_analysis(target, dev):
a = relay.var("a", relay.TensorType((512, 512), "float32"))
b = relay.var("b", relay.TensorType((512, 512), "float32"))
c = relay.nn.dense(a, b)
mod = tvm.IRModule.from_expr(relay.Function([a, b], c))
params = {}
server = rpc.Server(key="roofline")
remote = rpc.connect("127.0.0.1", server.port, key="roofline")
dev = remote.device(target)
report = tvm.utils.roofline_analysis(mod, params, target, dev, remote=remote)
print(report)
assert "Bound" in report.table()
assert "Percent of Theoretical Optimal" in report.table()
for call in report.calls:
if "Percent of Theoretical Optimal" in call:
if target.startswith("llvm"):
# Ideally we'd like a little tighter bound here, but it is hard to
# know how well this dense will perform without tuning. And we
# don't have an operator that uses a specific number of flops.
assert call["Percent of Theoretical Optimal"].ratio >= 5.0
elif target == "cuda":
# The cuda gpu kernel is really poorly optimized
assert 90 >= call["Percent of Theoretical Optimal"].ratio >= 0.01
if __name__ == "__main__":
tvm.testing.main()
| 5,832 | 41.889706 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_renew_defs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.tir.buffer import Buffer
from tvm.tir.function import PrimFunc
from tvm.tir.stmt import Block
def _check_func_signature_remap(lhs: PrimFunc, rhs: PrimFunc):
assert lhs != rhs
for x, y in zip(lhs.params, rhs.params):
assert x != y
assert lhs.buffer_map[x] != rhs.buffer_map[y]
def _check_buffer_decl(lhs: Buffer, rhs: Buffer):
assert lhs != rhs
assert lhs.data != rhs.data
def _check_block_signature_remap(lhs: Block, rhs: Block):
assert lhs != rhs
for x, y in zip(lhs.iter_vars, rhs.iter_vars):
assert x != y
assert x.var != y.var
for x, y in zip(lhs.alloc_buffers, rhs.alloc_buffers):
_check_buffer_decl(x, y)
for x, y in zip(lhs.match_buffers, rhs.match_buffers):
assert x != y
_check_buffer_decl(x.buffer, y.buffer)
def test_simple():
@T.prim_func
# Buffer A should be remapped
def elementwise(A: T.Buffer((128, 128), "float32")):
# Buffer B should be remapped
B = T.alloc_buffer((128, 128), "float32")
# i, j should be remapped
for i, j in T.grid(128, 128):
with T.block("B"):
# vi, vj should be remapped
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * 2.0
f1 = elementwise
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
# check root block
_check_block_signature_remap(f1.body.block, f2.body.block)
# check remap of i
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
# check remap of j
assert f1.body.block.body.body.loop_var != f2.body.block.body.body.loop_var
# check inner block
def _get_block(f):
return f.body.block.body.body.body.block
_check_block_signature_remap(_get_block(f1), _get_block(f2))
def test_match_buffer():
@T.prim_func
# A and B should be remapped
def func_match_buffer(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")):
with T.block("root"):
s = T.int32()
e = T.int32()
# A0 should be remapped
A0 = T.match_buffer(
A[0:128, 0:128],
shape=(128, 128),
dtype="float32",
# s and e should be remapped
strides=[s, s],
elem_offset=e,
)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A0[vi, vj] * 2.0
f1 = func_match_buffer
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
_check_func_signature_remap(f1, f2)
_check_block_signature_remap(f1.body.block, f2.body.block)
assert f1.body.block.body.loop_var != f2.body.block.body.loop_var
def _get_block(f):
return f.body.block
block1 = _get_block(f1)
block2 = _get_block(f2)
_check_block_signature_remap(block1, block2)
matched_buffer1 = block1.match_buffers[0].buffer
matched_buffer2 = block2.match_buffers[0].buffer
# Stride var s should be remapped
assert matched_buffer1.strides[0] != matched_buffer2.strides[0]
assert matched_buffer1.strides[1] != matched_buffer2.strides[1]
# s should be only remapped once
assert matched_buffer1.strides[0] == matched_buffer1.strides[1]
assert matched_buffer2.strides[0] == matched_buffer2.strides[1]
# Element-offset var e should be remapped
assert matched_buffer1.elem_offset != matched_buffer2.elem_offset
def test_undefined_buffer():
@T.prim_func
def access_alloc():
# Buffer A should be remapped
A_data = T.allocate([128], "float16", "global")
A = T.Buffer(shape=[128], dtype="float16", data=A_data)
# check if buffer var also get remapped
T.evaluate(A.data)
for i in range(128):
A[i] = A[i] + T.float16(1.0)
f1 = access_alloc
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
assert f1.body.buffer_var != f2.body.buffer_var
def _get_buffer_store_buffer(f):
return f.body.body[1].body.buffer
_check_buffer_decl(_get_buffer_store_buffer(f1), _get_buffer_store_buffer(f2))
def test_symbolic_func():
@T.prim_func
def symbolic_func(a: T.handle, b: T.handle, n: T.int32):
m = T.int32()
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m * 2))
for i, j in T.grid(n, m):
B[i, j * 2] = A[i, j]
B[i, j * 2 + 1] = A[i, j]
f1 = symbolic_func
f2 = tvm.tir.stmt_functor.renew_defs(f1)
tvm.ir.assert_structural_equal(f1, f2)
def test_buffer_map():
@T.prim_func
def main(a: T.handle, b: T.handle):
m = T.int64()
A = T.match_buffer(a, (m * 2,))
B = T.match_buffer(b, (m, 2))
for i, j in T.grid(m, 2):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi * 2 + vj]
f1 = main
f2 = tvm.tir.stmt_functor.renew_defs(main)
tvm.ir.assert_structural_equal(f1, f2)
assert f1.buffer_map[f1.params[1]].shape[0] != f2.buffer_map[f2.params[1]].shape[0]
if __name__ == "__main__":
tvm.testing.main()
| 6,288 | 31.755208 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
import tvm.testing
from tvm.meta_schedule.postproc import RewriteParallelVectorizeUnroll
from tvm.script import tir as T
from tvm.tir.schedule import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,not-callable,misplaced-comparison-constant
# fmt: off
@tvm.script.ir_module
class Move_PUV:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024, 1024], dtype="float32")
B = T.match_buffer(b, [1024, 1024, 1024], dtype="float32")
# body
with T.block("root"):
T.block_attr({"meta_schedule.parallel":128, "meta_schedule.vectorize":32})
for i0, j0, i1, j1, k0, i2, j2, k1 in T.grid(128, 64, 4, 4, 64, 4, 8, 32):
with T.block("move"):
vi = T.axis.spatial(1024, i0 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(1024, j0 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(1024, k0 * 32 + k1)
T.where((i0 * 4 + i1) * 4 + i2 < 1024 and (j0 * 4 + j1) * 8 + j2 < 1024 and k0 * 32 + k1 < 1024)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk]
@T.prim_func
def Move_PUV0(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024, 1024], dtype="float32")
B = T.match_buffer(b, [1024, 1024, 1024], dtype="float32")
# body
with T.block("root"):
for i0_j0_fused in T.parallel(0, 8192):
for i1, j1, k0, i2, j2 in T.grid(4, 4, 64, 4, 8):
for k1_fused in T.vectorized(0, 32):
with T.block("move"):
vi = T.axis.spatial(1024, i0_j0_fused // 64 * 16 + i1 * 4 + i2)
vj = T.axis.spatial(1024, i0_j0_fused % 64 * 32 + j1 * 8 + j2)
vk = T.axis.spatial(1024, k0 * 32 + k1_fused)
T.where(
i0_j0_fused // 64 * 16 + i1 * 4 + i2 < 1024
and i0_j0_fused % 64 * 32 + j1 * 8 + j2 < 1024
and k0 * 32 + k1_fused < 1024
)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk]
@tvm.script.ir_module
class Fused_NN_Dense:
@T.prim_func
def main(placeholder: T.Buffer((64, 768), "float32"), placeholder_1: T.Buffer((768, 768), "float32"), T_matmul_NT: T.Buffer((64, 768), "float32")) -> None:
for i0, i1, i2 in T.grid(64, 768, 768):
with T.block("T_matmul_NT"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT[i, j])
with T.init():
T_matmul_NT[i, j] = T.float32(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + placeholder[i, k] * placeholder_1[j, k]
@T.prim_func
def before_matmul_vectorize(
placeholder: T.Buffer((64, 768), "float32"),
placeholder_1: T.Buffer((768, 768), "float32"),
T_matmul_NT: T.Buffer((64, 768), "float32"),
) -> None:
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.vectorize":64})
T_matmul_NT_global = T.alloc_buffer([64, 768], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 16, 1, 3):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(48, 8, 1, 16, 8, 16):
with T.block("T_matmul_NT"):
i = T.axis.spatial(64, i0_2 * 8 + i0_3)
j = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + i1_3)
k = T.axis.reduce(768, i2_0 * 16 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT_global[i, j])
with T.init():
T_matmul_NT_global[i, j] = T.float32(0)
T_matmul_NT_global[i, j] = T_matmul_NT_global[i, j] + placeholder[i, k] * placeholder_1[j, k]
for ax0, ax1 in T.grid(64, 16):
with T.block("T_matmul_NT_global"):
v0 = T.axis.spatial(64, ax0)
v1 = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + ax1)
T.reads(T_matmul_NT_global[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_global[v0, v1]
@T.prim_func
def after_matmul_vectorize(
placeholder: T.Buffer((64, 768), "float32"),
placeholder_1: T.Buffer((768, 768), "float32"),
T_matmul_NT: T.Buffer((64, 768), "float32"),
) -> None:
T_matmul_NT_global = T.alloc_buffer([64, 768], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 16, 1, 3):
for i2_0, i0_2, i1_2, i2_1, i0_3 in T.grid(48, 8, 1, 16, 8):
for i1_3_fused in T.vectorized(16):
with T.block("T_matmul_NT"):
i = T.axis.spatial(64, i0_2 * 8 + i0_3)
j = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + i1_3_fused)
k = T.axis.reduce(768, i2_0 * 16 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j, k])
T.writes(T_matmul_NT_global[i, j])
with T.init():
T_matmul_NT_global[i, j] = T.float32(0)
T_matmul_NT_global[i, j] = T_matmul_NT_global[i, j] + placeholder[i, k] * placeholder_1[j, k]
for ax0 in T.serial(64):
for ax1_fused in T.vectorized(16):
with T.block("T_matmul_NT_global"):
v0 = T.axis.spatial(64, ax0)
v1 = T.axis.spatial(768, i1_0 * 48 + i1_1 * 16 + ax1_fused)
T.reads(T_matmul_NT_global[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_global[v0, v1]
@T.prim_func
def before_postproc_add(
lhs: T.Buffer((1, 8, 56, 56, 32), "uint8"),
rhs: T.Buffer((1, 8, 56, 56, 32), "uint8"),
add_compute: T.Buffer((1, 8, 56, 56, 32), "uint8"),
) -> None:
with T.block("root"):
T.block_attr({"meta_schedule.parallel":64, "meta_schedule.vectorize":128})
for n, c0, h, w, c1 in T.grid(1, 8, 56, 56, 32):
with T.block("add_compute"):
v0, v1, v2, v3, v4 = T.axis.remap("SSSSS", [n, c0, h, w, c1])
T.reads(lhs[v0, v1, v2, v3, v4], rhs[v0, v1, v2, v3, v4])
T.writes(add_compute[v0, v1, v2, v3, v4])
add_compute[v0, v1, v2, v3, v4] = lhs[v0, v1, v2, v3, v4] + rhs[v0, v1, v2, v3, v4]
@T.prim_func
def after_postproc_add(
lhs: T.Buffer((1, 8, 56, 56, 32), "uint8"),
rhs: T.Buffer((1, 8, 56, 56, 32), "uint8"),
add_compute: T.Buffer((1, 8, 56, 56, 32), "uint8"),
) -> None:
with T.block("root"):
for n_c0_h_w_c1_fused_0 in T.parallel(0, 6272):
for n_c0_h_w_c1_fused_1 in T.vectorized(0, 128):
with T.block("add_compute"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(8, (n_c0_h_w_c1_fused_0 * 128 + n_c0_h_w_c1_fused_1) // 100352)
v2 = T.axis.spatial(56, (n_c0_h_w_c1_fused_0 * 128 + n_c0_h_w_c1_fused_1) % 100352 // 1792)
v3 = T.axis.spatial(56, (n_c0_h_w_c1_fused_0 * 128 + n_c0_h_w_c1_fused_1) % 1792 // 32)
v4 = T.axis.spatial(32, (n_c0_h_w_c1_fused_0 * 128 + n_c0_h_w_c1_fused_1) % 32)
T.reads(lhs[v0, v1, v2, v3, v4], rhs[v0, v1, v2, v3, v4])
T.writes(add_compute[v0, v1, v2, v3, v4])
add_compute[v0, v1, v2, v3, v4] = lhs[v0, v1, v2, v3, v4] + rhs[v0, v1, v2, v3, v4]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,not-callable
def test_meta_schedule_postproc_rewrite_parallel_unroll_vectorize():
postproc = RewriteParallelVectorizeUnroll()
sch = Schedule(Move_PUV)
assert postproc.apply(sch)
mod = tvm.tir.transform.Simplify()(sch.mod)
tvm.ir.assert_structural_equal(mod["main"], Move_PUV0)
def test_vectorize_inner_loop():
sch = Schedule(before_matmul_vectorize)
rule = RewriteParallelVectorizeUnroll()
assert rule.apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], after_matmul_vectorize)
def test_parallel_vectorize_add():
sch = Schedule(before_postproc_add)
rule = RewriteParallelVectorizeUnroll()
assert rule.apply(sch)
tvm.ir.assert_structural_equal(sch.mod["main"], after_postproc_add)
def test_no_unroll_for_spatial_block():
# fmt: off
@T.prim_func
def layer_norm(A: T.Buffer((1, 4, 4, 32), "float32"), B: T.Buffer((4, 4, 32), "float32"), C: T.Buffer((4, 4, 32), "float32"), T_layer_norm: T.Buffer((1, 4, 4, 32), "float32")):
with T.block("root"):
T.block_attr({"meta_schedule.unroll_explicit": 512})
A_red_temp_v0 = T.alloc_buffer((1,))
A_red_temp_v1 = T.alloc_buffer((1,))
for ax0, k1, k2, k3 in T.grid(1, 4, 4, 32):
with T.block("A_red_temp"):
v_ax0, v_k1, v_k2, v_k3 = T.axis.remap("SRRR", [ax0, k1, k2, k3])
T.reads(A[v_ax0, v_k1, v_k2, v_k3])
T.writes(A_red_temp_v0[v_ax0], A_red_temp_v1[v_ax0])
with T.init():
A_red_temp_v0[v_ax0] = T.float32(0)
A_red_temp_v1[v_ax0] = T.float32(0)
v_A_red_temp_v0: T.float32 = A_red_temp_v0[v_ax0] + A[v_ax0, v_k1, v_k2, v_k3]
v_A_red_temp_v1: T.float32 = A_red_temp_v1[v_ax0] + A[v_ax0, v_k1, v_k2, v_k3] * A[v_ax0, v_k1, v_k2, v_k3]
A_red_temp_v0[v_ax0] = v_A_red_temp_v0
A_red_temp_v1[v_ax0] = v_A_red_temp_v1
for ax0, ax1, ax2, ax3 in T.grid(1, 4, 4, 32):
with T.block("T_layer_norm"):
v_ax0, v_ax1, v_ax2, v_ax3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3])
T.reads(A[v_ax0, v_ax1, v_ax2, v_ax3], A_red_temp_v0[v_ax0], A_red_temp_v1[v_ax0], B[v_ax1, v_ax2, v_ax3], C[v_ax1, v_ax2, v_ax3])
T.writes(T_layer_norm[v_ax0, v_ax1, v_ax2, v_ax3])
T_layer_norm[v_ax0, v_ax1, v_ax2, v_ax3] = (A[v_ax0, v_ax1, v_ax2, v_ax3] - A_red_temp_v0[v_ax0] * T.float32(0.001953125)) * T.rsqrt(A_red_temp_v1[v_ax0] * T.float32(0.001953125) - A_red_temp_v0[v_ax0] * T.float32(0.001953125) * (A_red_temp_v0[v_ax0] * T.float32(0.001953125)) + T.float32(1.0000000000000001e-05)) * B[v_ax1, v_ax2, v_ax3] + C[v_ax1, v_ax2, v_ax3]
@T.prim_func
def expected(A: T.Buffer((1, 4, 4, 32), "float32"), B: T.Buffer((4, 4, 32), "float32"), C: T.Buffer((4, 4, 32), "float32"), T_layer_norm: T.Buffer((1, 4, 4, 32), "float32")):
with T.block("root"):
A_red_temp_v0 = T.alloc_buffer((1,))
A_red_temp_v1 = T.alloc_buffer((1,))
for ax0 in T.serial(1, annotations={"pragma_auto_unroll_max_step": 512, "pragma_unroll_explicit": 1}):
for k1, k2, k3 in T.grid(4, 4, 32):
with T.block("A_red_temp"):
v_ax0 = T.axis.spatial(1, 0)
v_k1, v_k2, v_k3 = T.axis.remap("RRR", [k1, k2, k3])
T.reads(A[0, v_k1, v_k2, v_k3])
T.writes(A_red_temp_v0[0], A_red_temp_v1[0])
with T.init():
A_red_temp_v0[0] = T.float32(0)
A_red_temp_v1[0] = T.float32(0)
v_A_red_temp_v0: T.float32 = A_red_temp_v0[0] + A[0, v_k1, v_k2, v_k3]
v_A_red_temp_v1: T.float32 = A_red_temp_v1[0] + A[0, v_k1, v_k2, v_k3] * A[0, v_k1, v_k2, v_k3]
A_red_temp_v0[0] = v_A_red_temp_v0
A_red_temp_v1[0] = v_A_red_temp_v1
for ax0, ax1, ax2, ax3 in T.grid(1, 4, 4, 32):
with T.block("T_layer_norm"):
v_ax0 = T.axis.spatial(1, 0)
v_ax1, v_ax2, v_ax3 = T.axis.remap("SSS", [ax1, ax2, ax3])
T.reads(A[0, v_ax1, v_ax2, v_ax3], A_red_temp_v0[0], A_red_temp_v1[0], B[v_ax1, v_ax2, v_ax3], C[v_ax1, v_ax2, v_ax3])
T.writes(T_layer_norm[0, v_ax1, v_ax2, v_ax3])
T_layer_norm[0, v_ax1, v_ax2, v_ax3] = (A[0, v_ax1, v_ax2, v_ax3] - A_red_temp_v0[0] * T.float32(0.001953125)) * T.rsqrt(A_red_temp_v1[0] * T.float32(0.001953125) - A_red_temp_v0[0] * T.float32(0.001953125) * (A_red_temp_v0[0] * T.float32(0.001953125)) + T.float32(1.0000000000000001e-05)) * B[v_ax1, v_ax2, v_ax3] + C[v_ax1, v_ax2, v_ax3]
# fmt: on
postproc = RewriteParallelVectorizeUnroll()
sch = Schedule(layer_norm)
assert postproc.apply(sch)
mod = tvm.tir.transform.Simplify()(sch.mod)
tvm.ir.assert_structural_equal(mod["main"], expected)
if __name__ == "__main__":
tvm.testing.main()
| 14,123 | 50.926471 | 383 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_module_load.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib import cc, utils, popen_pool
import sys
import numpy as np
import subprocess
import tvm.testing
from tvm.relay.backend import Runtime
runtime_py = """
import os
import sys
os.environ["TVM_USE_RUNTIME_LIB"] = "1"
os.environ["TVM_FFI"] = "ctypes"
import tvm
from tvm import te
import numpy as np
path_dso = sys.argv[1]
dtype = sys.argv[2]
ff = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
ff(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
print("Finish runtime checking...")
"""
def test_dso_module_load():
if not tvm.testing.device_enabled("llvm"):
return
dtype = "int64"
temp = utils.tempdir()
def save_object(names):
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype)
i = te.var("i")
# for i in 0 to n-1:
stmt = tvm.tir.For(
i,
0,
n - 1,
tvm.tir.ForKind.SERIAL,
tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1]),
)
mod = tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "main")
)
m = tvm.driver.build(mod, target="llvm")
for name in names:
m.save(name)
path_obj = temp.relpath("test.o")
path_ll = temp.relpath("test.ll")
path_bc = temp.relpath("test.bc")
path_dso = temp.relpath("test.so")
save_object([path_obj, path_ll, path_bc])
cc.create_shared(path_dso, [path_obj])
f1 = tvm.runtime.load_module(path_dso)
f2 = tvm.runtime.load_module(path_ll)
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f1(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
a = tvm.nd.array(np.zeros(10, dtype=dtype))
f2(a)
np.testing.assert_equal(a.numpy(), np.arange(a.shape[0]))
path_runtime_py = temp.relpath("runtime.py")
with open(path_runtime_py, "w") as fo:
fo.write(runtime_py)
proc = subprocess.run(
[sys.executable, path_runtime_py, path_dso, dtype],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
assert proc.returncode == 0, f"{proc.args} exited with {proc.returncode}: {proc.stdout}"
@tvm.testing.requires_gpu
def test_device_module_dump():
# graph
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
# create iter var and assign them tags.
num_thread = 8
bx, tx = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
temp = utils.tempdir()
name = "myadd_%s" % device
if sys.platform == "darwin" or sys.platform.startswith("linux"):
runtime = Runtime("cpp", {"system-lib": True})
f = tvm.build(s, [A, B], device, "llvm", runtime=runtime, name=name)
elif sys.platform == "win32":
f = tvm.build(s, [A, B], device, "llvm", name=name)
else:
raise ValueError("Unsupported platform")
path_dso = temp.relpath("dev_lib.so")
# test cross compiler function
f.export_library(path_dso, cc.cross_compiler("g++"))
def popen_check():
import tvm
import sys
f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
f1(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if sys.platform != "win32":
f2 = tvm.runtime.system_lib()
f2[name](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# system lib should be loaded in different process
worker = popen_pool.PopenWorker()
worker.send(popen_check)
worker.recv()
def check_stackvm(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
temp = utils.tempdir()
name = "myadd_%s" % device
f = tvm.build(s, [A, B], device, "stackvm", name=name)
path_dso = temp.relpath("dev_lib.stackvm")
f.export_library(path_dso)
f1 = tvm.runtime.load_module(path_dso)
a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
f(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
for device in ["cuda", "vulkan", "opencl", "metal"]:
check_device(device)
check_stackvm(device)
def test_combine_module_llvm():
"""Test combine multiple module into one shared lib."""
# graph
nn = 12
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
def check_llvm():
dev = tvm.cpu(0)
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = utils.tempdir()
fadd1 = tvm.build(s, [A, B], "llvm", name="myadd1")
fadd2 = tvm.build(s, [A, B], "llvm", name="myadd2")
path1 = temp.relpath("myadd1.o")
path2 = temp.relpath("myadd2.o")
path_dso = temp.relpath("mylib.so")
fadd1.save(path1)
fadd2.save(path2)
# create shared library with multiple functions
cc.create_shared(path_dso, [path1, path2])
m = tvm.runtime.load_module(path_dso)
fadd1 = m["myadd1"]
fadd2 = m["myadd2"]
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev)
fadd1(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
fadd2(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_system_lib():
dev = tvm.cpu(0)
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
temp = utils.tempdir()
runtime = Runtime("cpp", {"system-lib": True})
fadd1 = tvm.build(s, [A, B], "llvm", runtime=runtime, name="myadd1")
fadd2 = tvm.build(s, [A, B], "llvm", runtime=runtime, name="myadd2")
path1 = temp.relpath("myadd1.o")
path2 = temp.relpath("myadd2.o")
path_dso = temp.relpath("mylib.so")
fadd1.save(path1)
fadd2.save(path2)
cc.create_shared(path_dso, [path1, path2])
def popen_check():
import tvm.runtime
import ctypes
# Load dll, will trigger system library registration
ctypes.CDLL(path_dso)
# Load the system wide library
mm = tvm.runtime.system_lib()
a = tvm.nd.array(np.random.uniform(size=nn).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=A.dtype), dev)
mm["myadd1"](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
mm["myadd2"](a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# system lib should be loaded in different process
worker = popen_pool.PopenWorker()
worker.send(popen_check)
worker.recv()
if sys.platform != "win32":
check_system_lib()
check_llvm()
if __name__ == "__main__":
test_combine_module_llvm()
test_device_module_dump()
test_dso_module_load()
| 8,635 | 33.682731 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_inject_virtual_thread.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
vthread_name = tvm.testing.parameter("vthread", "cthread")
def test_vthread(vthread_name):
dtype = "int64"
n = 100
m = 4
nthread = 2
def get_vthread(name):
tx = te.thread_axis(name)
ty = te.thread_axis(name)
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
with ib.for_range(0, n) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
ib.scope_attr(ty, "virtual_thread", nthread)
B = ib.allocate("float32", m, name="B", scope="shared")
B[i] = A[i * nthread + tx]
bbuffer = B.asobject()
ib.emit(
tvm.tir.call_extern(
"int32",
"Run",
bbuffer.access_ptr("r"),
tvm.tir.call_intrin("int32", "tir.tvm_context_id"),
)
)
C[i * nthread + tx] = B[i] + 1
return ib.get()
if vthread_name == "vthread":
B_expected_alloc = m * nthread
elif vthread_name == "cthread":
B_expected_alloc = m * nthread * nthread
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], get_vthread(vthread_name)))
)["main"]
assert list(stmt.body.body.extents) == [B_expected_alloc]
def test_vthread_extern(vthread_name):
dtype = "int64"
n = 100
m = 4
nthread = 2
def get_vthread(name):
tx = te.thread_axis(name)
ty = te.thread_axis(name)
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, n) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
ib.scope_attr(ty, "virtual_thread", nthread)
A = ib.allocate("float32", m, name="A", scope="shared")
B = ib.allocate("float32", m, name="B", scope="shared")
C = ib.allocate("float32", m, name="C", scope="shared")
abuffer = A.asobject()
bbuffer = B.asobject()
cbuffer = C.asobject()
A[tx] = tx + 1.0
B[ty] = ty + 1.0
ib.emit(
tvm.tir.call_extern(
"int32",
"Run",
abuffer.access_ptr("r"),
bbuffer.access_ptr("r"),
cbuffer.access_ptr("rw"),
)
)
return ib.get()
if vthread_name == "vthread":
A_expected_alloc = m * nthread
elif vthread_name == "cthread":
A_expected_alloc = m * nthread * nthread
C_expected_alloc = m * nthread * nthread
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], get_vthread(vthread_name)))
)["main"]
assert list(stmt.body.body.extents) == [A_expected_alloc]
assert list(stmt.body.body.body.body.extents) == [C_expected_alloc]
def test_vthread_if_then_else():
nthread = 2
tx = te.thread_axis("vthread")
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
with ib.for_range(0, 100) as i:
ib.scope_attr(tx, "virtual_thread", nthread)
B = ib.allocate("float32", 128, name="B", scope="shared")
with ib.if_scope(i == 0):
B[i] = A[i * nthread + tx]
with ib.else_scope():
B[i] = A[i * nthread + tx] + 1
with ib.if_scope(i == 0):
B[i] = A[i * nthread + tx] + 2
stmt = ib.get()
stmt = tvm.tir.transform.InjectVirtualThread()(
tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
)["main"]
assert stmt.body.body.body[0].else_case != None
assert stmt.body.body.body[1].else_case == None
def test_vthread_simplified():
"""Indices resulting from vthread injection should simplified
This ensures that downstream passes that check for Ramp nodes do
not need to each simplify the indices.
"""
@T.prim_func
def before_func():
vthread = T.env_thread("vthread")
T.launch_thread(vthread, 4)
B_data = T.allocate([4], "int32", scope="shared")
B = T.Buffer([4], "int32", data=B_data, scope="shared")
B[0:4] = T.broadcast(vthread, 4)
@T.prim_func
def expected_func():
B_data = T.allocate([16], "int32", scope="shared")
B = T.Buffer([16], "int32", data=B_data, scope="shared")
# The indices for B should each be a single Ramp node, and
# should not be the sum of a Ramp and Broadcast node.
B[T.Mul(0, 4) : T.Mul(0, 4) + 4] = T.broadcast(0, 4)
B[T.Mul(1, 4) : T.Mul(1, 4) + 4] = T.broadcast(1, 4)
B[T.Mul(2, 4) : T.Mul(2, 4) + 4] = T.broadcast(2, 4)
B[T.Mul(3, 4) : T.Mul(3, 4) + 4] = T.broadcast(3, 4)
before_mod = tvm.IRModule.from_expr(before_func)
after_mod = tvm.tir.transform.InjectVirtualThread()(before_mod)
after_func = after_mod["main"]
tvm.ir.assert_structural_equal(after_func, expected_func)
def test_vthread_vectorized():
"""Use of vthread is compatible with vector allocations"""
@T.prim_func
def before_func():
vthread = T.env_thread("vthread")
T.launch_thread(vthread, 4)
B_data = T.allocate([4], "int32", "shared")
B = T.Buffer([4], "int32", data=B_data, scope="shared")
B[0:4] = T.broadcast(vthread, 4)
@T.prim_func
def expected_func():
B_data = T.allocate([4], "int32x4", "shared")
B = T.Buffer([4], "int32x4", data=B_data, scope="shared")
B[T.Div(T.Mul(0, 4), 4)] = T.broadcast(0, 4)
B[T.Div(T.Mul(1, 4), 4)] = T.broadcast(1, 4)
B[T.Div(T.Mul(2, 4), 4)] = T.broadcast(2, 4)
B[T.Div(T.Mul(3, 4), 4)] = T.broadcast(3, 4)
before_mod = tvm.IRModule.from_expr(before_func)
intermediate_mod = tvm.tir.transform.InjectVirtualThread()(before_mod)
after_mod = tvm.tir.transform.StorageRewrite()(intermediate_mod)
after_func = after_mod["main"]
tvm.ir.assert_structural_equal(after_func, expected_func)
if __name__ == "__main__":
tvm.testing.main()
| 6,933 | 33.67 | 79 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_space_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for MetaSchedule search space on CUDA"""
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.script import tir as T
from tvm.target import Target
def _target():
return Target("nvidia/geforce-rtx-2080") # disable async trace using sm75
def _design_space(mod):
return generate_design_space(
kind="cuda",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cuda_c1d():
# fmt: off
@T.prim_func
def c1d_0(inputs: T.Buffer((1, 256, 64), "float32"), weight: T.Buffer((3, 64, 128), "float32"), conv1d_nlc: T.Buffer((1, 128, 128), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
conv1d_nlc_local = T.alloc_buffer((1, 128, 128), scope="local")
PadInput_shared = T.alloc_buffer((1, 258, 64), scope="shared")
weight_shared = T.alloc_buffer((3, 64, 128), scope="shared")
for n_0_l_0_co_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for n_1_l_1_co_1_fused in T.thread_binding(16, thread="vthread.x"):
for n_2_l_2_co_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for rl_0, rc_0 in T.grid(1, 16):
for ax0_ax1_ax2_fused in range(260):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(258, n_0_l_0_co_0_fused * 64 + ax0_ax1_ax2_fused // 4)
v2 = T.axis.spatial(64, rc_0 * 4 + ax0_ax1_ax2_fused % 4)
T.reads(inputs[v0, v1 - 1, v2])
T.writes(PadInput_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
PadInput_shared[v0, v1, v2] = T.if_then_else(1 <= v1 and v1 < 257, inputs[v0, v1 - 1, v2], T.float32(0))
for ax0_ax1_ax2_fused in range(1536):
with T.block("weight_shared"):
v0 = T.axis.spatial(3, ax0_ax1_ax2_fused // 512)
v1 = T.axis.spatial(64, rc_0 * 4 + ax0_ax1_ax2_fused % 512 // 128)
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(weight[v0, v1, v2])
T.writes(weight_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
weight_shared[v0, v1, v2] = weight[v0, v1, v2]
for rl_1, rc_1, n_3, l_3, co_3, rl_2, rc_2, n_4, l_4, co_4 in T.grid(1, 2, 1, 1, 2, 3, 2, 1, 4, 8):
with T.block("conv1d_nlc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_l = T.axis.spatial(128, n_0_l_0_co_0_fused * 32 + n_1_l_1_co_1_fused // 2 * 4 + l_3 * 4 + l_4)
v_co = T.axis.spatial(128, n_1_l_1_co_1_fused % 2 * 64 + n_2_l_2_co_2_fused * 16 + co_3 * 8 + co_4)
v_rl = T.axis.reduce(3, rl_0 * 3 + rl_1 * 3 + rl_2)
v_rc = T.axis.reduce(64, rc_0 * 4 + rc_1 * 2 + rc_2)
T.reads(PadInput_shared[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc], weight_shared[v_rl, v_rc, v_co])
T.writes(conv1d_nlc_local[v_n, v_l, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv1d_nlc_local[v_n, v_l, v_co] = T.float32(0)
conv1d_nlc_local[v_n, v_l, v_co] = conv1d_nlc_local[v_n, v_l, v_co] + PadInput_shared[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc] * weight_shared[v_rl, v_rc, v_co]
for ax0, ax1, ax2 in T.grid(1, 4, 16):
with T.block("conv1d_nlc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, n_0_l_0_co_0_fused * 32 + n_1_l_1_co_1_fused // 2 * 4 + ax1)
v2 = T.axis.spatial(128, n_1_l_1_co_1_fused % 2 * 64 + n_2_l_2_co_2_fused * 16 + ax2)
T.reads(conv1d_nlc_local[v0, v1, v2])
T.writes(conv1d_nlc[v0, v1, v2])
conv1d_nlc[v0, v1, v2] = conv1d_nlc_local[v0, v1, v2]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 8, 1, 1, 4]),
("SamplePerfectTile", [1, 2, 4, 2, 8]),
("SamplePerfectTile", [1, 1, 3]),
("SamplePerfectTile", [16, 2, 2]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("C1D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c1d_0],
expected_decisions=[decision_0],
)
def test_cuda_c2d():
# fmt: off
@T.prim_func
def c2d_0(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
conv2d_nhwc_local = T.alloc_buffer((1, 112, 112, 64), scope="local")
PadInput_shared = T.alloc_buffer((1, 230, 230, 3), scope="shared")
weight_shared = T.alloc_buffer((7, 7, 3, 64), scope="shared")
for n_0_h_0_w_0_co_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for n_1_h_1_w_1_co_1_fused in T.thread_binding(56, thread="vthread.x"):
for n_2_h_2_w_2_co_2_fused in T.thread_binding(14, thread="threadIdx.x"):
for rh_0, rw_0, rc_0 in T.grid(1, 1, 1):
for ax0_ax1_ax2_ax3_fused in range(80379):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, ax0_ax1_ax2_ax3_fused // 351)
v2 = T.axis.spatial(230, n_0_h_0_w_0_co_0_fused // 8 * 112 + ax0_ax1_ax2_ax3_fused % 351 // 3)
v3 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 3)
T.reads(inputs[v0, v1 - 3, v2 - 3, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(1176):
with T.block("weight_shared"):
v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused // 168)
v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused % 168 // 24)
v2 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 24 // 8)
v3 = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 8 * 8 + ax0_ax1_ax2_ax3_fused % 8)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(1, 7, 1, 1, 8, 4, 1, 7, 1, 3, 1, 1, 1, 2):
with T.block("conv2d_nhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(112, n_2_h_2_w_2_co_2_fused * 8 + h_3 + h_4)
v_w = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 56 + n_1_h_1_w_1_co_1_fused // 4 * 4 + w_3 + w_4)
v_co = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 8 * 8 + n_1_h_1_w_1_co_1_fused % 4 * 2 + co_3 * 2 + co_4)
v_rh = T.axis.reduce(7, rh_0 * 7 + rh_1 * 7 + rh_2)
v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1 + rw_2)
v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1 * 3 + rc_2)
T.reads(PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight_shared[v_rh, v_rw, v_rc, v_co])
T.writes(conv2d_nhwc_local[v_n, v_h, v_w, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0)
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_nhwc_local[v_n, v_h, v_w, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight_shared[v_rh, v_rw, v_rc, v_co]
for ax0, ax1, ax2, ax3 in T.grid(1, 8, 4, 2):
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, n_2_h_2_w_2_co_2_fused * 8 + ax1)
v2 = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 56 + n_1_h_1_w_1_co_1_fused // 4 * 4 + ax2)
v3 = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 8 * 8 + n_1_h_1_w_1_co_1_fused % 4 * 2 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 14, 8, 1]),
("SamplePerfectTile", [2, 14, 1, 4, 1]),
("SamplePerfectTile", [8, 4, 1, 1, 2]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 1, 3]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 1),
]
mod = create_te_workload("C2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c2d_0],
expected_decisions=[decision_0],
)
def test_cuda_c3d():
# fmt: off
@T.prim_func
def c3d_0(inputs: T.Buffer((1, 16, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 7, 3, 64), "float32"), conv3d_ndhwc: T.Buffer((1, 8, 112, 112, 64), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
conv3d_ndhwc_local = T.alloc_buffer((1, 8, 112, 112, 64), scope="local")
PadInput_shared = T.alloc_buffer((1, 22, 230, 230, 3), scope="shared")
weight_shared = T.alloc_buffer((7, 7, 7, 3, 64), scope="shared")
for n_0_d_0_h_0_w_0_co_0_fused in T.thread_binding(2, thread="blockIdx.x"):
for n_1_d_1_h_1_w_1_co_1_fused in T.thread_binding(8, thread="vthread.x"):
for n_2_d_2_h_2_w_2_co_2_fused in T.thread_binding(392, thread="threadIdx.x"):
for rd_0, rh_0, rw_0, rc_0 in T.grid(1, 1, 1, 1):
for ax0_ax1_ax2_ax3_ax4_fused in range(1687959):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(22, ax0_ax1_ax2_ax3_ax4_fused // 80379)
v2 = T.axis.spatial(230, ax0_ax1_ax2_ax3_ax4_fused % 80379 // 351)
v3 = T.axis.spatial(230, n_0_d_0_h_0_w_0_co_0_fused * 112 + ax0_ax1_ax2_ax3_ax4_fused % 351 // 3)
v4 = T.axis.spatial(3, ax0_ax1_ax2_ax3_ax4_fused % 3)
T.reads(inputs[v0, v1 - 3, v2 - 3, v3 - 3, v4])
T.writes(PadInput_shared[v0, v1, v2, v3, v4])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
PadInput_shared[v0, v1, v2, v3, v4] = T.if_then_else(3 <= v1 and v1 < 19 and 3 <= v2 and v2 < 227 and 3 <= v3 and v3 < 227, inputs[v0, v1 - 3, v2 - 3, v3 - 3, v4], T.float32(0))
for ax0_ax1_ax2_ax3_ax4_fused in range(65856):
with T.block("weight_shared"):
v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused // 9408)
v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused % 9408 // 1344)
v2 = T.axis.spatial(7, ax0_ax1_ax2_ax3_ax4_fused % 1344 // 192)
v3 = T.axis.spatial(3, ax0_ax1_ax2_ax3_ax4_fused % 192 // 64)
v4 = T.axis.spatial(64, ax0_ax1_ax2_ax3_ax4_fused % 64)
T.reads(weight[v0, v1, v2, v3, v4])
T.writes(weight_shared[v0, v1, v2, v3, v4])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
weight_shared[v0, v1, v2, v3, v4] = weight[v0, v1, v2, v3, v4]
for rd_1, rh_1, rw_1, rc_1, n_3, d_3, h_3, w_3, co_3, rd_2, rh_2, rw_2, rc_2, n_4, d_4, h_4, w_4, co_4 in T.grid(7, 7, 1, 3, 1, 2, 2, 1, 32, 1, 1, 7, 1, 1, 1, 2, 4, 1):
with T.block("conv3d_ndhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_d = T.axis.spatial(8, n_2_d_2_h_2_w_2_co_2_fused // 98 * 2 + d_3 + d_4)
v_h = T.axis.spatial(112, n_1_d_1_h_1_w_1_co_1_fused // 2 * 28 + n_2_d_2_h_2_w_2_co_2_fused % 98 // 14 * 4 + h_3 * 2 + h_4)
v_w = T.axis.spatial(112, n_0_d_0_h_0_w_0_co_0_fused * 56 + n_1_d_1_h_1_w_1_co_1_fused % 2 * 28 + n_2_d_2_h_2_w_2_co_2_fused % 14 // 2 * 4 + w_3 * 4 + w_4)
v_co = T.axis.spatial(64, n_2_d_2_h_2_w_2_co_2_fused % 2 * 32 + co_3 + co_4)
v_rd = T.axis.reduce(7, rd_0 * 7 + rd_1 + rd_2)
v_rh = T.axis.reduce(7, rh_0 * 7 + rh_1 + rh_2)
v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1 * 7 + rw_2)
v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1 + rc_2)
T.reads(PadInput_shared[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight_shared[v_rd, v_rh, v_rw, v_rc, v_co])
T.writes(conv3d_ndhwc_local[v_n, v_d, v_h, v_w, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv3d_ndhwc_local[v_n, v_d, v_h, v_w, v_co] = T.float32(0)
conv3d_ndhwc_local[v_n, v_d, v_h, v_w, v_co] = conv3d_ndhwc_local[v_n, v_d, v_h, v_w, v_co] + PadInput_shared[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight_shared[v_rd, v_rh, v_rw, v_rc, v_co]
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 2, 4, 4, 32):
with T.block("conv3d_ndhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, n_2_d_2_h_2_w_2_co_2_fused // 98 * 2 + ax1)
v2 = T.axis.spatial(112, n_1_d_1_h_1_w_1_co_1_fused // 2 * 28 + n_2_d_2_h_2_w_2_co_2_fused % 98 // 14 * 4 + ax2)
v3 = T.axis.spatial(112, n_0_d_0_h_0_w_0_co_0_fused * 56 + n_1_d_1_h_1_w_1_co_1_fused % 2 * 28 + n_2_d_2_h_2_w_2_co_2_fused % 14 // 2 * 4 + ax3)
v4 = T.axis.spatial(64, n_2_d_2_h_2_w_2_co_2_fused % 2 * 32 + ax4)
T.reads(conv3d_ndhwc_local[v0, v1, v2, v3, v4])
T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4])
conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_local[v0, v1, v2, v3, v4]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 4, 2, 1]),
("SamplePerfectTile", [1, 4, 7, 2, 2]),
("SamplePerfectTile", [2, 2, 7, 1, 4]),
("SamplePerfectTile", [1, 1, 2, 32, 1]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 7, 1]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [1, 3, 1]),
("SampleCategorical", 3),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("C3D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[c3d_0],
expected_decisions=[decision_0],
)
def test_cuda_cap():
# fmt: off
@T.prim_func
def cap_0(inputs: T.Buffer((1, 16, 16, 4, 4, 32), "float32"), weight: T.Buffer((3, 3, 4, 4, 32, 32), "float32"), conv2d_capsule_nhwijc: T.Buffer((1, 8, 8, 4, 4, 32), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 64})
conv2d_capsule_nhwijc_local = T.alloc_buffer((1, 8, 8, 4, 4, 32), scope="local")
PadInput_shared = T.alloc_buffer((1, 18, 18, 4, 4, 32), scope="shared")
weight_shared = T.alloc_buffer((3, 3, 4, 4, 32, 32), scope="shared")
for n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused in T.thread_binding(256, thread="blockIdx.x"):
for n_1_h_1_w_1_cap_i_1_cap_j_1_co_1_fused in T.thread_binding(1, thread="vthread.x"):
for n_2_h_2_w_2_cap_i_2_cap_j_2_co_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for rh_0, rw_0, cap_k_0, rc_0 in T.grid(3, 3, 2, 8):
for ax0_ax1_ax2_ax3_ax4_ax5_fused in range(48):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(18, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused // 64 * 4 + rh_0 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 48 // 16)
v2 = T.axis.spatial(18, T.Add(n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 64 // 8 * 2 + rw_0, 0))
v3 = T.axis.spatial(4, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 8 // 4 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 16 // 8)
v4 = T.axis.spatial(4, cap_k_0 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 8 // 4)
v5 = T.axis.spatial(32, rc_0 * 4 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 4)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3, v4, v5])
T.writes(PadInput_shared[v0, v1, v2, v3, v4, v5])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
PadInput_shared[v0, v1, v2, v3, v4, v5] = T.if_then_else(1 <= v1 and v1 < 17 and 1 <= v2 and v2 < 17, inputs[v0, v1 - 1, v2 - 1, v3, v4, v5], T.float32(0))
for ax0_ax1_ax2_ax3_ax4_ax5_fused in range(256):
with T.block("weight_shared"):
v0, v1 = T.axis.remap("SS", [rh_0, rw_0])
v2 = T.axis.spatial(4, cap_k_0 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_fused // 128)
v3 = T.axis.spatial(4, ax0_ax1_ax2_ax3_ax4_ax5_fused % 128 // 32)
v4 = T.axis.spatial(32, rc_0 * 4 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 32 // 8)
v5 = T.axis.spatial(32, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 4 * 8 + ax0_ax1_ax2_ax3_ax4_ax5_fused % 8)
T.reads(weight[v0, v1, v2, v3, v4, v5])
T.writes(weight_shared[v0, v1, v2, v3, v4, v5])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
weight_shared[v0, v1, v2, v3, v4, v5] = weight[v0, v1, v2, v3, v4, v5]
for rh_1, rw_1, cap_k_1, rc_1, n_3, h_3, w_3, cap_i_3, cap_j_3, co_3, rh_2, rw_2, cap_k_2, rc_2, n_4, h_4, w_4, cap_i_4, cap_j_4, co_4 in T.grid(1, 1, 1, 4, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 8):
with T.block("conv2d_capsule_nhwijc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(8, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused // 64 * 2 + h_3 + h_4)
v_w = T.axis.spatial(8, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 64 // 8 + w_3 + w_4)
v_cap_i = T.axis.spatial(4, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 8 // 4 * 2 + n_2_h_2_w_2_cap_i_2_cap_j_2_co_2_fused // 2 + cap_i_3 + cap_i_4)
v_cap_j = T.axis.spatial(4, n_2_h_2_w_2_cap_i_2_cap_j_2_co_2_fused % 2 * 2 + cap_j_3 * 2 + cap_j_4)
v_co = T.axis.spatial(32, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 4 * 8 + co_3 * 8 + co_4)
v_rh = T.axis.reduce(3, rh_0 + rh_1 + rh_2)
v_rw = T.axis.reduce(3, rw_0 + rw_1 + rw_2)
v_cap_k = T.axis.reduce(4, cap_k_0 * 2 + cap_k_1 * 2 + cap_k_2)
v_rc = T.axis.reduce(32, rc_0 * 4 + rc_1 + rc_2)
T.reads(PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc], weight_shared[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co])
T.writes(conv2d_capsule_nhwijc_local[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv2d_capsule_nhwijc_local[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = T.float32(0)
conv2d_capsule_nhwijc_local[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = conv2d_capsule_nhwijc_local[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc] * weight_shared[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co]
for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 2, 1, 1, 2, 8):
with T.block("conv2d_capsule_nhwijc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused // 64 * 2 + ax1)
v2 = T.axis.spatial(8, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 64 // 8 + ax2)
v3 = T.axis.spatial(4, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 8 // 4 * 2 + n_2_h_2_w_2_cap_i_2_cap_j_2_co_2_fused // 2 + ax3)
v4 = T.axis.spatial(4, n_2_h_2_w_2_cap_i_2_cap_j_2_co_2_fused % 2 * 2 + ax4)
v5 = T.axis.spatial(32, n_0_h_0_w_0_cap_i_0_cap_j_0_co_0_fused % 4 * 8 + ax5)
T.reads(conv2d_capsule_nhwijc_local[v0, v1, v2, v3, v4, v5])
T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5])
conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_local[v0, v1, v2, v3, v4, v5]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 1, 2, 1]),
("SamplePerfectTile", [8, 1, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 2, 1, 1]),
("SamplePerfectTile", [1, 1, 2, 1, 2]),
("SamplePerfectTile", [4, 1, 1, 1, 8]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [2, 1, 2]),
("SamplePerfectTile", [8, 4, 1]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
mod = create_te_workload("CAP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cap_0],
expected_decisions=[decision_0],
)
def test_cuda_dep():
# fmt: off
@T.prim_func
def dep_0(placeholder: T.Buffer((1, 112, 112, 32), "float32"), placeholder_1: T.Buffer((1, 3, 3, 32), "float32"), depth_conv2d_nhwc: T.Buffer((1, 112, 112, 32), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
depth_conv2d_nhwc_local = T.alloc_buffer((1, 112, 112, 32), scope="local")
PadInput_shared = T.alloc_buffer((1, 114, 114, 32), scope="shared")
placeholder_shared = T.alloc_buffer((1, 3, 3, 32), scope="shared")
for n_0_h_0_w_0_c_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for n_1_h_1_w_1_c_1_fused in T.thread_binding(8, thread="vthread.x"):
for n_2_h_2_w_2_c_2_fused in T.thread_binding(14, thread="threadIdx.x"):
for rh_0, rw_0 in T.grid(1, 1):
for ax0_ax1_ax2_ax3_fused in range(415872):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(114, ax0_ax1_ax2_ax3_fused // 3648)
v2 = T.axis.spatial(114, ax0_ax1_ax2_ax3_fused % 3648 // 32)
v3 = T.axis.spatial(32, ax0_ax1_ax2_ax3_fused % 32)
T.reads(placeholder[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 113 and 1 <= v2 and v2 < 113, placeholder[v0, v1 - 1, v2 - 1, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(288):
with T.block("placeholder_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused // 96)
v2 = T.axis.spatial(3, ax0_ax1_ax2_ax3_fused % 96 // 32)
v3 = T.axis.spatial(32, ax0_ax1_ax2_ax3_fused % 32)
T.reads(placeholder_1[v0, v1, v2, v3])
T.writes(placeholder_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
placeholder_shared[v0, v1, v2, v3] = placeholder_1[v0, v1, v2, v3]
for rh_1, rw_1, n_3, h_3, w_3, c_3, rh_2, rw_2, n_4, h_4, w_4, c_4 in T.grid(3, 1, 1, 4, 16, 8, 1, 3, 1, 7, 1, 1):
with T.block("depth_conv2d_nhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(112, n_1_h_1_w_1_c_1_fused // 2 * 28 + h_3 * 7 + h_4)
v_w = T.axis.spatial(112, n_2_h_2_w_2_c_2_fused // 2 * 16 + w_3 + w_4)
v_c = T.axis.spatial(32, n_1_h_1_w_1_c_1_fused % 2 * 16 + n_2_h_2_w_2_c_2_fused % 2 * 8 + c_3 + c_4)
v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1 + rh_2)
v_rw = T.axis.reduce(3, rw_0 * 3 + rw_1 * 3 + rw_2)
T.reads(PadInput_shared[v_n, v_h + v_rh, v_w + v_rw, v_c], placeholder_shared[0, v_rh, v_rw, v_c])
T.writes(depth_conv2d_nhwc_local[v_n, v_h, v_w, v_c])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
depth_conv2d_nhwc_local[v_n, v_h, v_w, v_c] = T.float32(0)
depth_conv2d_nhwc_local[v_n, v_h, v_w, v_c] = depth_conv2d_nhwc_local[v_n, v_h, v_w, v_c] + PadInput_shared[v_n, v_h + v_rh, v_w + v_rw, v_c] * placeholder_shared[0, v_rh, v_rw, v_c]
for ax0, ax1, ax2, ax3 in T.grid(1, 28, 16, 8):
with T.block("depth_conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, n_1_h_1_w_1_c_1_fused // 2 * 28 + ax1)
v2 = T.axis.spatial(112, n_2_h_2_w_2_c_2_fused // 2 * 16 + ax2)
v3 = T.axis.spatial(32, n_1_h_1_w_1_c_1_fused % 2 * 16 + n_2_h_2_w_2_c_2_fused % 2 * 8 + ax3)
T.reads(depth_conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(depth_conv2d_nhwc[v0, v1, v2, v3])
depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 4, 1, 4, 7]),
("SamplePerfectTile", [1, 1, 7, 16, 1]),
("SamplePerfectTile", [1, 2, 2, 8, 1]),
("SamplePerfectTile", [1, 3, 1]),
("SamplePerfectTile", [1, 1, 3]),
("SampleCategorical", 2),
("SampleCategorical", 2),
("SampleCategorical", 1),
]
mod = create_te_workload("DEP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dep_0],
expected_decisions=[decision_0],
)
def test_cuda_dil():
# fmt: off
@T.prim_func
def dil_0(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 109, 109, 64), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 512})
conv2d_nhwc_local = T.alloc_buffer((1, 109, 109, 64), scope="local")
PadInput_shared = T.alloc_buffer((1, 230, 230, 3), scope="shared")
weight_shared = T.alloc_buffer((7, 7, 3, 64), scope="shared")
for n_0_h_0_w_0_co_0_fused in T.thread_binding(218, thread="blockIdx.x"):
for n_1_h_1_w_1_co_1_fused in T.thread_binding(109, thread="vthread.x"):
for n_2_h_2_w_2_co_2_fused in T.thread_binding(1, thread="threadIdx.x"):
for rh_0, rw_0, rc_0 in T.grid(7, 7, 3):
for ax0_ax1_ax2_ax3_fused in range(217):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, T.Add(n_0_h_0_w_0_co_0_fused // 2 * 2 + rh_0 * 2, 0))
v2 = T.axis.spatial(230, rw_0 * 2 + ax0_ax1_ax2_ax3_fused % 217)
v3 = T.axis.spatial(3, T.Add(rc_0, 0))
T.reads(inputs[v0, v1 - 3, v2 - 3, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(32):
with T.block("weight_shared"):
v0, v1, v2 = T.axis.remap("SSS", [rh_0, rw_0, rc_0])
v3 = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(1, 1, 1, 1, 1, 1, 8, 1, 1, 1, 1, 1, 1, 4):
with T.block("conv2d_nhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(109, n_0_h_0_w_0_co_0_fused // 2 + h_3 + h_4)
v_w = T.axis.spatial(109, n_1_h_1_w_1_co_1_fused + w_3 + w_4)
v_co = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 2 * 32 + co_3 * 4 + co_4)
v_rh = T.axis.reduce(7, rh_0 + rh_1 + rh_2)
v_rw = T.axis.reduce(7, rw_0 + rw_1 + rw_2)
v_rc = T.axis.reduce(3, rc_0 + rc_1 + rc_2)
T.reads(PadInput_shared[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc], weight_shared[v_rh, v_rw, v_rc, v_co])
T.writes(conv2d_nhwc_local[v_n, v_h, v_w, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0)
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_nhwc_local[v_n, v_h, v_w, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc] * weight_shared[v_rh, v_rw, v_rc, v_co]
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 1, 32):
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(109, n_0_h_0_w_0_co_0_fused // 2 + ax1)
v2 = T.axis.spatial(109, n_1_h_1_w_1_co_1_fused + ax2)
v3 = T.axis.spatial(64, n_0_h_0_w_0_co_0_fused % 2 * 32 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [109, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 109, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 8, 4]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 3),
]
mod = create_te_workload("DIL", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[dil_0],
expected_decisions=[decision_0],
)
def test_cuda_gmm():
# fmt: off
@T.prim_func
def gmm_0(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 1024})
Z_local = T.alloc_buffer((1, 128, 128), scope="local")
X_shared = T.alloc_buffer((1, 128, 128), scope="shared")
Y_shared = T.alloc_buffer((1, 128, 128), scope="shared")
for b_0_i_0_j_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for b_1_i_1_j_1_fused in T.thread_binding(32, thread="vthread.x"):
for b_2_i_2_j_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for k_0 in range(1):
for ax0_ax1_ax2_fused in range(16384):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, ax0_ax1_ax2_fused // 128)
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused in range(16384):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, ax0_ax1_ax2_fused // 128)
v2 = T.axis.spatial(128, ax0_ax1_ax2_fused % 128)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for k_1, b_3, i_3, j_3, k_2, b_4, i_4, j_4 in T.grid(32, 1, 2, 64, 4, 1, 2, 1):
with T.block("Z"):
v_b = T.axis.spatial(1, b_3 + b_4)
v_i = T.axis.spatial(128, b_1_i_1_j_1_fused * 4 + i_3 * 2 + i_4)
v_j = T.axis.spatial(128, b_2_i_2_j_2_fused * 64 + j_3 + j_4)
v_k = T.axis.reduce(128, k_0 * 128 + k_1 * 4 + k_2)
T.reads(X_shared[v_b, v_i, v_k], Y_shared[v_b, v_k, v_j])
T.writes(Z_local[v_b, v_i, v_j])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
Z_local[v_b, v_i, v_j] = T.float32(0)
Z_local[v_b, v_i, v_j] = Z_local[v_b, v_i, v_j] + X_shared[v_b, v_i, v_k] * Y_shared[v_b, v_k, v_j]
for ax0, ax1, ax2 in T.grid(1, 4, 64):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, b_1_i_1_j_1_fused * 4 + ax1)
v2 = T.axis.spatial(128, b_2_i_2_j_2_fused * 64 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 32, 1, 2, 2]),
("SamplePerfectTile", [1, 1, 2, 64, 1]),
("SamplePerfectTile", [1, 32, 4]),
("SampleCategorical", 1),
("SampleCategorical", 0),
("SampleCategorical", 4),
]
mod = create_te_workload("GMM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[gmm_0],
expected_decisions=[decision_0],
)
def test_cuda_grp():
# fmt: off
@T.prim_func
def grp_0(inputs: T.Buffer((1, 56, 56, 64), "float32"), weight: T.Buffer((3, 3, 16, 128), "float32"), conv2d_nhwc: T.Buffer((1, 28, 28, 128), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
conv2d_nhwc_local = T.alloc_buffer((1, 28, 28, 128), scope="local")
PadInput_shared = T.alloc_buffer((1, 58, 58, 64), scope="shared")
weight_shared = T.alloc_buffer((3, 3, 16, 128), scope="shared")
for n_0_h_0_w_0_co_0_fused in T.thread_binding(2, thread="blockIdx.x"):
for n_1_h_1_w_1_co_1_fused in T.thread_binding(1, thread="vthread.x"):
for n_2_h_2_w_2_co_2_fused in T.thread_binding(112, thread="threadIdx.x"):
for rh_0, rw_0, rc_0 in T.grid(3, 3, 1):
for ax0_ax1_ax2_ax3_fused in range(95040):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(58, n_0_h_0_w_0_co_0_fused * 28 + rh_0 + ax0_ax1_ax2_ax3_fused % 95040 // 3520)
v2 = T.axis.spatial(58, rw_0 + ax0_ax1_ax2_ax3_fused % 3520 // 64)
v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 57 and 1 <= v2 and v2 < 57, inputs[v0, v1 - 1, v2 - 1, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(2048):
with T.block("weight_shared"):
v0, v1 = T.axis.remap("SS", [rh_0, rw_0])
v2 = T.axis.spatial(16, ax0_ax1_ax2_ax3_fused // 128)
v3 = T.axis.spatial(128, ax0_ax1_ax2_ax3_fused % 128)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(1, 1, 2, 1, 2, 1, 2, 1, 1, 8, 1, 7, 4, 4):
with T.block("conv2d_nhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(28, n_0_h_0_w_0_co_0_fused * 14 + h_3 * 7 + h_4)
v_w = T.axis.spatial(28, n_2_h_2_w_2_co_2_fused // 16 * 4 + w_3 * 4 + w_4)
v_co = T.axis.spatial(128, n_2_h_2_w_2_co_2_fused % 16 * 8 + co_3 * 4 + co_4)
v_rh = T.axis.reduce(3, rh_0 + rh_1 + rh_2)
v_rw = T.axis.reduce(3, rw_0 + rw_1 + rw_2)
v_rc = T.axis.reduce(16, rc_0 * 16 + rc_1 * 8 + rc_2)
T.reads(PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc], weight_shared[v_rh, v_rw, v_rc, v_co])
T.writes(conv2d_nhwc_local[v_n, v_h, v_w, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0)
conv2d_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_nhwc_local[v_n, v_h, v_w, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc] * weight_shared[v_rh, v_rw, v_rc, v_co]
for ax0, ax1, ax2, ax3 in T.grid(1, 14, 4, 8):
with T.block("conv2d_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(28, n_0_h_0_w_0_co_0_fused * 14 + ax1)
v2 = T.axis.spatial(28, n_2_h_2_w_2_co_2_fused // 16 * 4 + ax2)
v3 = T.axis.spatial(128, n_2_h_2_w_2_co_2_fused % 16 * 8 + ax3)
T.reads(conv2d_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_nhwc[v0, v1, v2, v3])
conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [2, 1, 1, 2, 7]),
("SamplePerfectTile", [1, 1, 7, 1, 4]),
("SamplePerfectTile", [1, 1, 16, 2, 4]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [3, 1, 1]),
("SamplePerfectTile", [1, 2, 8]),
("SampleCategorical", 1),
("SampleCategorical", 0),
("SampleCategorical", 1),
]
mod = create_te_workload("GRP", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[grp_0],
expected_decisions=[decision_0],
)
def test_cuda_t2d():
# fmt: off
@T.prim_func
def t2d_0(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 64})
conv2d_transpose_nhwc_local = T.alloc_buffer((1, 8, 8, 256), scope="local")
PadInput_shared = T.alloc_buffer((1, 6, 6, 512), scope="shared")
weight_shared = T.alloc_buffer((4, 4, 512, 256), scope="shared")
for n_0_h_0_w_0_co_0_fused in T.thread_binding(256, thread="blockIdx.x"):
for n_1_h_1_w_1_co_1_fused in T.thread_binding(2, thread="vthread.x"):
for n_2_h_2_w_2_co_2_fused in T.thread_binding(1, thread="threadIdx.x"):
for rh_0, rw_0, rc_0 in T.grid(4, 1, 16):
for ax0_ax1_ax2_ax3_fused in range(rh_0 % 2 * 96 + 96):
with T.block("PadInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(6, n_0_h_0_w_0_co_0_fused // 64 + rh_0 // 2 + ax0_ax1_ax2_ax3_fused % (96 * (rh_0 % 2 + 1)) // 96)
v2 = T.axis.spatial(6, n_0_h_0_w_0_co_0_fused % 64 // 16 + ax0_ax1_ax2_ax3_fused % 96 // 32)
v3 = T.axis.spatial(512, rc_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(inputs[v0, v1 - 1, v2 - 1, v3])
T.writes(PadInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
PadInput_shared[v0, v1, v2, v3] = T.if_then_else(1 <= v1 and v1 < 5 and 1 <= v2 and v2 < 5, inputs[v0, v1 - 1, v2 - 1, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(2048):
with T.block("weight_shared"):
v0 = T.axis.spatial(4, rh_0 * -1 + 3)
v1 = T.axis.spatial(4, ax0_ax1_ax2_ax3_fused // 512)
v2 = T.axis.spatial(512, rc_0 * 32 + ax0_ax1_ax2_ax3_fused % 512 // 16)
v3 = T.axis.spatial(256, n_0_h_0_w_0_co_0_fused % 16 * 16 + ax0_ax1_ax2_ax3_fused % 16)
T.reads(weight[v0, v1, v2, v3])
T.writes(weight_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3]
for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(1, 1, 4, 1, 2, 1, 8, 1, 4, 8, 1, 1, 2, 1):
with T.block("conv2d_transpose_nhwc"):
v_n = T.axis.spatial(1, n_3 + n_4)
v_h = T.axis.spatial(8, n_0_h_0_w_0_co_0_fused // 64 * 2 + h_3 + h_4)
v_w = T.axis.spatial(8, n_0_h_0_w_0_co_0_fused % 64 // 16 * 2 + w_3 * 2 + w_4)
v_co = T.axis.spatial(256, n_0_h_0_w_0_co_0_fused % 16 * 16 + n_1_h_1_w_1_co_1_fused * 8 + co_3 + co_4)
v_rh = T.axis.reduce(4, rh_0 + rh_1 + rh_2)
v_rw = T.axis.reduce(4, rw_0 * 4 + rw_1 * 4 + rw_2)
v_rc = T.axis.reduce(512, rc_0 * 32 + rc_1 * 8 + rc_2)
T.reads(PadInput_shared[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], weight_shared[3 - v_rh, 3 - v_rw, v_rc, v_co])
T.writes(conv2d_transpose_nhwc_local[v_n, v_h, v_w, v_co])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
conv2d_transpose_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0)
conv2d_transpose_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_transpose_nhwc_local[v_n, v_h, v_w, v_co] + T.if_then_else((v_h + v_rh) % 2 == 0 and (v_w + v_rw) % 2 == 0, PadInput_shared[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], T.float32(0)) * weight_shared[3 - v_rh, 3 - v_rw, v_rc, v_co]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 2, 8):
with T.block("conv2d_transpose_nhwc_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(8, n_0_h_0_w_0_co_0_fused // 64 * 2 + ax1)
v2 = T.axis.spatial(8, n_0_h_0_w_0_co_0_fused % 64 // 16 * 2 + ax2)
v3 = T.axis.spatial(256, n_0_h_0_w_0_co_0_fused % 16 * 16 + n_1_h_1_w_1_co_1_fused * 8 + ax3)
T.reads(conv2d_transpose_nhwc_local[v0, v1, v2, v3])
T.writes(conv2d_transpose_nhwc[v0, v1, v2, v3])
conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [4, 1, 1, 2, 1]),
("SamplePerfectTile", [4, 1, 1, 1, 2]),
("SamplePerfectTile", [16, 2, 1, 8, 1]),
("SamplePerfectTile", [4, 1, 1]),
("SamplePerfectTile", [1, 1, 4]),
("SamplePerfectTile", [16, 4, 8]),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
mod = create_te_workload("T2D", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[t2d_0],
expected_decisions=[decision_0],
debug_mask=0,
)
def test_cuda_nrm():
# fmt: off
@T.prim_func
def nrm_0(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer(1, "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 512})
C = T.alloc_buffer((1,))
for b_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for b_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i, j in T.grid(256, 256):
with T.block("C"):
v_b = T.axis.spatial(1, 0)
v_i, v_j = T.axis.remap("RR", [i, j])
T.reads(A[v_b, v_i, v_j])
T.writes(C[v_b])
with T.init():
C[v_b] = T.float32(0)
C[v_b] = C[v_b] + A[v_b, v_i, v_j] * A[v_b, v_i, v_j]
for b_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for b_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("D"):
v_b = T.axis.spatial(1, 0)
T.reads(C[v_b])
T.writes(D[v_b])
D[v_b] = T.sqrt(C[v_b])
@T.prim_func
def nrm_1(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer(1, "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 1024})
C_shared = T.alloc_buffer((1,), scope="shared")
for b_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0, ax1_ax2_fused_0 in T.grid(1, 512):
for ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("C"):
v_b = T.axis.spatial(1, ax0)
v_i = T.axis.reduce(256, (ax1_ax2_fused_0 * 128 + ax1_ax2_fused_1) // 256)
v_j = T.axis.reduce(256, (ax1_ax2_fused_0 * 128 + ax1_ax2_fused_1) % 256)
T.reads(A[v_b, v_i, v_j])
T.writes(C_shared[v_b])
with T.init():
C_shared[v_b] = T.float32(0)
C_shared[v_b] = C_shared[v_b] + A[v_b, v_i, v_j] * A[v_b, v_i, v_j]
for b_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("D"):
v_b = T.axis.spatial(1, b_1)
T.where(T.Mul(0, 128) + b_1 < 1)
T.reads(C_shared[v_b])
T.writes(D[v_b])
D[v_b] = T.sqrt(C_shared[v_b])
# fmt: on
decision_0 = [
("SampleCategorical", 3),
]
decision_1 = [
("SampleCategorical", 5),
("SampleCategorical", 4),
]
mod = create_te_workload("NRM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[nrm_0, nrm_1],
expected_decisions=[decision_0, decision_1],
)
def test_cuda_sfm():
# fmt: off
@T.prim_func
def sfm_0(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 0})
T_softmax_maxelem = T.alloc_buffer((256,))
T_softmax_expsum = T.alloc_buffer((256,))
for i0_fused_0 in T.thread_binding(2, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for k in range(256):
with T.block("T_softmax_maxelem"):
v_i0 = T.axis.spatial(256, i0_fused_0 * 128 + i0_fused_1)
v_k = T.axis.reduce(256, k)
T.reads(A[v_i0, v_k])
T.writes(T_softmax_maxelem[v_i0])
with T.init():
T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k])
for i0_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
for k in range(256):
with T.block("T_softmax_expsum"):
v_i0 = T.axis.spatial(256, i0_fused_0 * 256 + i0_fused_1)
v_k = T.axis.reduce(256, k)
T.reads(A[v_i0, v_k], T_softmax_maxelem[v_i0])
T.writes(T_softmax_expsum[v_i0])
with T.init():
T_softmax_expsum[v_i0] = T.float32(0)
T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem[v_i0])
for i0_i1_fused_0 in T.thread_binding(1024, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
v_i0 = T.axis.spatial(256, (i0_i1_fused_0 * 64 + i0_i1_fused_1) // 256)
v_i1 = T.axis.spatial(256, (i0_i1_fused_0 * 64 + i0_i1_fused_1) % 256)
T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0])
T.writes(T_softmax_norm[v_i0, v_i1])
T.block_attr({"axis": 1})
T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0]
@T.prim_func
def sfm_1(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 16})
T_softmax_maxelem = T.alloc_buffer((256,))
T_softmax_expsum = T.alloc_buffer((256,))
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for k_0 in range(64):
for k_1 in T.thread_binding(4, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
v_i0 = T.axis.spatial(256, i0_fused)
v_k = T.axis.reduce(256, k_0 * 4 + k_1)
T.reads(A[v_i0, v_k])
T.writes(T_softmax_maxelem[v_i0])
with T.init():
T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k])
for i0_fused_0 in T.thread_binding(4, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(64, thread="threadIdx.x"):
for k in range(256):
with T.block("T_softmax_expsum"):
v_i0 = T.axis.spatial(256, i0_fused_0 * 64 + i0_fused_1)
v_k = T.axis.reduce(256, k)
T.reads(A[v_i0, v_k], T_softmax_maxelem[v_i0])
T.writes(T_softmax_expsum[v_i0])
with T.init():
T_softmax_expsum[v_i0] = T.float32(0)
T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem[v_i0])
for i0_i1_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i0_i1_fused_1 in T.thread_binding(256, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
v_i0 = T.axis.spatial(256, (i0_i1_fused_0 * 256 + i0_i1_fused_1) // 256)
v_i1 = T.axis.spatial(256, (i0_i1_fused_0 * 256 + i0_i1_fused_1) % 256)
T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0])
T.writes(T_softmax_norm[v_i0, v_i1])
T.block_attr({"axis": 1})
T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0]
@T.prim_func
def sfm_2(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 512})
T_softmax_maxelem = T.alloc_buffer((256,))
T_softmax_expsum_shared = T.alloc_buffer((256,), scope="shared")
for i0_fused_0 in T.thread_binding(8, thread="blockIdx.x"):
for i0_fused_1 in T.thread_binding(32, thread="threadIdx.x"):
for k in range(256):
with T.block("T_softmax_maxelem"):
v_i0 = T.axis.spatial(256, i0_fused_0 * 32 + i0_fused_1)
v_k = T.axis.reduce(256, k)
T.reads(A[v_i0, v_k])
T.writes(T_softmax_maxelem[v_i0])
with T.init():
T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k])
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
v_i0 = T.axis.spatial(256, i0_fused + ax0)
v_k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[v_i0, v_k], T_softmax_maxelem[v_i0])
T.writes(T_softmax_expsum_shared[v_i0])
with T.init():
T_softmax_expsum_shared[v_i0] = T.float32(0)
T_softmax_expsum_shared[v_i0] = T_softmax_expsum_shared[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem[v_i0])
for i1_0 in range(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
v_i0 = T.axis.spatial(256, i0_fused)
v_i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum_shared[v_i0])
T.writes(T_softmax_norm[v_i0, v_i1])
T.block_attr({"axis": 1})
T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum_shared[v_i0]
@T.prim_func
def sfm_3(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 0})
T_softmax_maxelem_shared = T.alloc_buffer((256,), scope="shared")
T_softmax_expsum_shared = T.alloc_buffer((256,), scope="shared")
for i0_fused in T.thread_binding(256, thread="blockIdx.x"):
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_maxelem"):
v_i0 = T.axis.spatial(256, i0_fused + ax0)
v_k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[v_i0, v_k])
T.writes(T_softmax_maxelem_shared[v_i0])
with T.init():
T_softmax_maxelem_shared[v_i0] = T.float32(-3.4028234663852886e+38)
T_softmax_maxelem_shared[v_i0] = T.max(T_softmax_maxelem_shared[v_i0], A[v_i0, v_k])
for ax0, ax1_0 in T.grid(1, 1):
for ax1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_expsum"):
v_i0 = T.axis.spatial(256, i0_fused + ax0)
v_k = T.axis.reduce(256, ax1_0 * 512 + ax1_1)
T.where(ax1_0 * 512 + ax1_1 < 256)
T.reads(A[v_i0, v_k], T_softmax_maxelem_shared[v_i0])
T.writes(T_softmax_expsum_shared[v_i0])
with T.init():
T_softmax_expsum_shared[v_i0] = T.float32(0)
T_softmax_expsum_shared[v_i0] = T_softmax_expsum_shared[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem_shared[v_i0])
for i1_0 in range(1):
for i1_1 in T.thread_binding(512, thread="threadIdx.x"):
with T.block("T_softmax_norm"):
v_i0 = T.axis.spatial(256, i0_fused)
v_i1 = T.axis.spatial(256, i1_0 * 512 + i1_1)
T.where(i1_0 * 512 + i1_1 < 256)
T.reads(A[v_i0, v_i1], T_softmax_maxelem_shared[v_i0], T_softmax_expsum_shared[v_i0])
T.writes(T_softmax_norm[v_i0, v_i1])
T.block_attr({"axis": 1})
T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem_shared[v_i0]) / T_softmax_expsum_shared[v_i0]
# fmt: on
decision_0 = [
("SampleCategorical", 0),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 2),
]
decision_1 = [
("SampleCategorical", 0),
("SampleCategorical", 1),
("SampleCategorical", 3),
("SampleCategorical", 1),
]
decision_2 = [
("SampleCategorical", 7),
("SampleCategorical", 3),
("SampleCategorical", 0),
]
decision_3 = [
("SampleCategorical", 7),
("SampleCategorical", 0),
("SampleCategorical", 0),
]
mod = create_te_workload("SFM", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[sfm_0, sfm_1, sfm_2, sfm_3],
expected_decisions=[decision_0, decision_1, decision_2, decision_3],
)
def test_cuda_cbr():
# fmt: off
@T.prim_func
def cbr_0(data: T.Buffer((1, 224, 224, 3), "float32"), kernel: T.Buffer((7, 7, 3, 64), "float32"), bias: T.Buffer(64, "float32"), bn_offset: T.Buffer(64, "float32"), bn_scale: T.Buffer(64, "float32"), compute: T.Buffer((1, 112, 112, 64), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 512})
Conv2dOutput_local = T.alloc_buffer((1, 112, 112, 64), scope="local")
PaddedInput_shared = T.alloc_buffer((1, 230, 230, 3), scope="shared")
kernel_shared = T.alloc_buffer((7, 7, 3, 64), scope="shared")
for nn_0_yy_0_xx_0_ff_0_fused in T.thread_binding(14, thread="blockIdx.x"):
for nn_1_yy_1_xx_1_ff_1_fused in T.thread_binding(4, thread="vthread.x"):
for nn_2_yy_2_xx_2_ff_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for ry_0, rx_0, rc_0 in T.grid(7, 1, 3):
for ax0_ax1_ax2_ax3_fused in range(8251):
with T.block("PaddedInput_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(230, ry_0 + ax0_ax1_ax2_ax3_fused // 37)
v2 = T.axis.spatial(230, nn_0_yy_0_xx_0_ff_0_fused // 2 * 32 + ax0_ax1_ax2_ax3_fused % 37)
v3 = T.axis.spatial(3, rc_0)
T.reads(data[v0, v1 - 3, v2 - 3, v3])
T.writes(PaddedInput_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
PaddedInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, data[v0, v1 - 3, v2 - 3, v3], T.float32(0))
for ax0_ax1_ax2_ax3_fused in range(224):
with T.block("kernel_shared"):
v0 = T.axis.spatial(7, ry_0)
v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused // 32)
v2 = T.axis.spatial(3, rc_0)
v3 = T.axis.spatial(64, nn_0_yy_0_xx_0_ff_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(kernel[v0, v1, v2, v3])
T.writes(kernel_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
kernel_shared[v0, v1, v2, v3] = kernel[v0, v1, v2, v3]
for ry_1, rx_1, rc_1, nn_3, yy_3, xx_3, ff_3, ry_2, rx_2, rc_2, nn_4, yy_4, xx_4, ff_4 in T.grid(1, 1, 1, 1, 1, 1, 2, 1, 7, 1, 1, 7, 1, 8):
with T.block("Conv2dOutput"):
v_nn = T.axis.spatial(1, nn_3 + nn_4)
v_yy = T.axis.spatial(112, nn_1_yy_1_xx_1_ff_1_fused // 2 * 56 + nn_2_yy_2_xx_2_ff_2_fused // 16 * 7 + yy_3 * 7 + yy_4)
v_xx = T.axis.spatial(112, nn_0_yy_0_xx_0_ff_0_fused // 2 * 16 + nn_2_yy_2_xx_2_ff_2_fused % 16 + xx_3 + xx_4)
v_ff = T.axis.spatial(64, nn_0_yy_0_xx_0_ff_0_fused % 2 * 32 + nn_1_yy_1_xx_1_ff_1_fused % 2 * 16 + ff_3 * 8 + ff_4)
v_ry = T.axis.reduce(7, ry_0 + ry_1 + ry_2)
v_rx = T.axis.reduce(7, rx_0 * 7 + rx_1 * 7 + rx_2)
v_rc = T.axis.reduce(3, rc_0 + rc_1 + rc_2)
T.reads(PaddedInput_shared[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc], kernel_shared[v_ry, v_rx, v_rc, v_ff])
T.writes(Conv2dOutput_local[v_nn, v_yy, v_xx, v_ff])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
Conv2dOutput_local[v_nn, v_yy, v_xx, v_ff] = T.float32(0)
Conv2dOutput_local[v_nn, v_yy, v_xx, v_ff] = Conv2dOutput_local[v_nn, v_yy, v_xx, v_ff] + PaddedInput_shared[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc] * kernel_shared[v_ry, v_rx, v_rc, v_ff]
for ax0, ax1, ax2, ax3 in T.grid(1, 7, 1, 16):
with T.block("Conv2dOutput_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(112, nn_1_yy_1_xx_1_ff_1_fused // 2 * 56 + nn_2_yy_2_xx_2_ff_2_fused // 16 * 7 + ax1)
v2 = T.axis.spatial(112, nn_0_yy_0_xx_0_ff_0_fused // 2 * 16 + nn_2_yy_2_xx_2_ff_2_fused % 16 + ax2)
v3 = T.axis.spatial(64, nn_0_yy_0_xx_0_ff_0_fused % 2 * 32 + nn_1_yy_1_xx_1_ff_1_fused % 2 * 16 + ax3)
T.reads(Conv2dOutput_local[v0, v1, v2, v3], bias[v3], bn_scale[v3], bn_offset[v3])
T.writes(compute[v0, v1, v2, v3])
compute[v0, v1, v2, v3] = T.max((Conv2dOutput_local[v0, v1, v2, v3] + bias[v3]) * bn_scale[v3] + bn_offset[v3], T.float32(0))
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 2, 8, 1, 7]),
("SamplePerfectTile", [7, 1, 16, 1, 1]),
("SamplePerfectTile", [2, 2, 1, 2, 8]),
("SamplePerfectTile", [7, 1, 1]),
("SamplePerfectTile", [1, 1, 7]),
("SamplePerfectTile", [3, 1, 1]),
("SampleCategorical", 0),
("SampleCategorical", 0),
("SampleCategorical", 3),
]
mod = create_te_workload("CBR", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cbr_0],
expected_decisions=[decision_0],
)
def test_cuda_tbg():
# fmt: off
@T.prim_func
def tbg_0(query: T.Buffer((1, 128, 12, 64), "float32"), value: T.Buffer((1, 128, 12, 64), "float32"), C: T.Buffer((1, 12, 128, 128), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit": 1024})
C_local = T.alloc_buffer((1, 12, 128, 128), scope="local")
query_T_shared = T.alloc_buffer((1, 12, 128, 64), scope="shared")
value_T_shared = T.alloc_buffer((1, 12, 64, 128), scope="shared")
for b_0_h_0_i_0_j_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for b_1_h_1_i_1_j_1_fused in T.thread_binding(192, thread="vthread.x"):
for b_2_h_2_i_2_j_2_fused in T.thread_binding(32, thread="threadIdx.x"):
for k_0 in range(8):
for ax0_ax1_ax2_ax3_fused in range(12288):
with T.block("query_T_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(12, ax0_ax1_ax2_ax3_fused // 1024)
v2 = T.axis.spatial(128, ax0_ax1_ax2_ax3_fused % 1024 // 8)
v3 = T.axis.spatial(64, k_0 * 8 + ax0_ax1_ax2_ax3_fused % 8)
T.reads(query[v0, v2, v1, v3])
T.writes(query_T_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
query_T_shared[v0, v1, v2, v3] = query[v0, v2, v1, v3]
for ax0_ax1_ax2_ax3_fused in range(3072):
with T.block("value_T_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(12, ax0_ax1_ax2_ax3_fused // 256)
v2 = T.axis.spatial(64, k_0 * 8 + ax0_ax1_ax2_ax3_fused % 256 // 32)
v3 = T.axis.spatial(128, b_0_h_0_i_0_j_0_fused * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(value[v0, v3, v1, v2])
T.writes(value_T_shared[v0, v1, v2, v3])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
value_T_shared[v0, v1, v2, v3] = value[v0, v3, v1, v2]
for k_1, b_3, h_3, i_3, j_3, k_2, b_4, h_4, i_4, j_4 in T.grid(4, 1, 2, 1, 1, 2, 1, 1, 4, 1):
with T.block("C"):
v_b = T.axis.spatial(1, b_3 + b_4)
v_h = T.axis.spatial(12, b_1_h_1_i_1_j_1_fused // 32 * 2 + h_3 + h_4)
v_i = T.axis.spatial(128, b_1_h_1_i_1_j_1_fused % 32 // 8 * 32 + b_2_h_2_i_2_j_2_fused // 4 * 4 + i_3 * 4 + i_4)
v_j = T.axis.spatial(128, b_0_h_0_i_0_j_0_fused * 32 + b_1_h_1_i_1_j_1_fused % 8 * 4 + b_2_h_2_i_2_j_2_fused % 4 + j_3 + j_4)
v_k = T.axis.reduce(64, k_0 * 8 + k_1 * 2 + k_2)
T.reads(query_T_shared[v_b, v_h, v_i, v_k], value_T_shared[v_b, v_h, v_k, v_j])
T.writes(C_local[v_b, v_h, v_i, v_j])
T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"})
with T.init():
C_local[v_b, v_h, v_i, v_j] = T.float32(0)
C_local[v_b, v_h, v_i, v_j] = C_local[v_b, v_h, v_i, v_j] + query_T_shared[v_b, v_h, v_i, v_k] * value_T_shared[v_b, v_h, v_k, v_j]
for ax0, ax1, ax2, ax3 in T.grid(1, 2, 4, 1):
with T.block("C_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(12, b_1_h_1_i_1_j_1_fused // 32 * 2 + ax1)
v2 = T.axis.spatial(128, b_1_h_1_i_1_j_1_fused % 32 // 8 * 32 + b_2_h_2_i_2_j_2_fused // 4 * 4 + ax2)
v3 = T.axis.spatial(128, b_0_h_0_i_0_j_0_fused * 32 + b_1_h_1_i_1_j_1_fused % 8 * 4 + b_2_h_2_i_2_j_2_fused % 4 + ax3)
T.reads(C_local[v0, v1, v2, v3])
T.writes(C[v0, v1, v2, v3])
C[v0, v1, v2, v3] = C_local[v0, v1, v2, v3]
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1, 1]),
("SamplePerfectTile", [1, 6, 1, 2, 1]),
("SamplePerfectTile", [1, 4, 8, 1, 4]),
("SamplePerfectTile", [4, 8, 4, 1, 1]),
("SamplePerfectTile", [8, 4, 2]),
("SampleCategorical", 2),
("SampleCategorical", 3),
("SampleCategorical", 4),
]
mod = create_te_workload("TBG", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[tbg_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
test_cuda_c1d()
test_cuda_c2d()
test_cuda_c3d()
test_cuda_cap()
test_cuda_dep()
test_cuda_dil()
test_cuda_gmm()
test_cuda_grp()
test_cuda_t2d()
test_cuda_nrm()
test_cuda_sfm()
test_cuda_cbr()
test_cuda_tbg()
| 82,325 | 65.499192 | 329 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_verify_vtcm_limit.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import tir
from tvm.script import tir as T
def _create_context(mod, target) -> ms.TuneContext:
return ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[ms.postproc.VerifyVTCMLimit()],
mutator_probs={},
),
task_name="test",
)
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,not-callable,misplaced-comparison-constant
# fmt: off
@tvm.script.ir_module
class Conv2dNCHWcVTCM:
@T.prim_func
def main(p0: T.Buffer((T.int64(1), T.int64(2), T.int64(56), T.int64(56), T.int64(32)), "uint8"), p1: T.Buffer((T.int64(2), T.int64(2), T.int64(3), T.int64(3), T.int64(8), T.int64(32), T.int64(4)), "uint8"), conv2d_NCHWc_int8: T.Buffer((T.int64(1), T.int64(2), T.int64(54), T.int64(54), T.int64(32)), "int32")):
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
p0_global_vtcm = T.alloc_buffer([T.int64(1), T.int64(2), T.int64(56), T.int64(56), T.int64(32)], dtype="uint8", scope="global.vtcm")
p1_global_vtcm = T.alloc_buffer([T.int64(2), T.int64(2), T.int64(3), T.int64(3), T.int64(8), T.int64(32), T.int64(4)], dtype="uint8", scope="global.vtcm")
for n_0 in T.serial(T.int64(1), annotations={"pragma_auto_unroll_max_step":16, "pragma_unroll_explicit":1}):
for oc_chunk_0, oh_0, ow_0, oc_block_0_0 in T.grid(T.int64(2), T.int64(2), T.int64(2), T.int64(1)):
for oc_chunk_1_init, oh_1_init, ow_1_init, oc_chunk_2_init, oh_2_init, ow_2_init in T.grid(T.int64(1), T.int64(27), T.int64(3), T.int64(1), T.int64(1), T.int64(9)):
with T.block("conv2d_NCHWc_int8_o_init"):
v_n = T.axis.spatial(T.int64(1), T.int64(0))
v_oc_chunk = T.axis.spatial(T.int64(2), oc_chunk_1_init + oc_chunk_2_init + oc_chunk_0)
v_oh = T.axis.spatial(T.int64(54), oh_2_init + oh_0 * T.int64(27) + oh_1_init)
v_ow = T.axis.spatial(T.int64(54), ow_0 * T.int64(27) + ow_1_init * T.int64(9) + ow_2_init)
v_oc_block_o = T.axis.spatial(T.int64(1), T.int64(0))
T.reads()
T.writes(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, T.int64(0) : T.int64(32)])
for oc_block_1 in T.vectorized(T.int64(32)):
with T.block("conv2d_NCHWc_int8_init"):
v_oc_block_i_init = T.axis.spatial(T.int64(32), oc_block_1)
T.reads()
T.writes(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i_init])
conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i_init] = 0
for kh_0_kw_0_ic_outer_0_ic_f_inner_0_ic_s_inner_0_0_fused in T.serial(T.int64(2), annotations={"software_pipeline_async_stages":[0], "software_pipeline_order":[0, 1, 2], "software_pipeline_stage":[0, 0, 1]}):
for ax0_ax1_ax2_ax3_ax4_fused in T.serial(T.int64(26912)):
with T.block("p0_global.vtcm"):
v0 = T.axis.spatial(T.int64(1), T.int64(0))
v1 = T.axis.spatial(T.int64(2), ax0_ax1_ax2_ax3_ax4_fused // T.int64(13456))
v2 = T.axis.spatial(T.int64(56), oh_0 * T.int64(27) + ax0_ax1_ax2_ax3_ax4_fused % T.int64(13456) // T.int64(464))
v3 = T.axis.spatial(T.int64(56), ow_0 * T.int64(27) + ax0_ax1_ax2_ax3_ax4_fused % T.int64(464) // T.int64(16))
v4 = T.axis.spatial(T.int64(32), kh_0_kw_0_ic_outer_0_ic_f_inner_0_ic_s_inner_0_0_fused * T.int64(16) + ax0_ax1_ax2_ax3_ax4_fused % T.int64(16))
T.reads(p0[v0, v1, v2, v3, v4])
T.writes(p0_global_vtcm[v0, v1, v2, v3, v4])
p0_global_vtcm[v0, v1, v2, v3, v4] = p0[v0, v1, v2, v3, v4]
for ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused in T.serial(T.int64(9216)):
with T.block("p1_global.vtcm"):
v0 = T.axis.spatial(T.int64(2), oc_chunk_0)
v1 = T.axis.spatial(T.int64(2), ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused // T.int64(4608))
v2 = T.axis.spatial(T.int64(3), ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % T.int64(4608) // T.int64(1536))
v3 = T.axis.spatial(T.int64(3), ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % T.int64(1536) // T.int64(512))
v4 = T.axis.spatial(T.int64(8), kh_0_kw_0_ic_outer_0_ic_f_inner_0_ic_s_inner_0_0_fused * T.int64(4) + ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % T.int64(512) // T.int64(128))
v5 = T.axis.spatial(T.int64(32), ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % T.int64(128) // T.int64(4))
v6 = T.axis.spatial(T.int64(4), ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % T.int64(4))
T.reads(p1[v0, v1, v2, v3, v4, v5, v6])
T.writes(p1_global_vtcm[v0, v1, v2, v3, v4, v5, v6])
p1_global_vtcm[v0, v1, v2, v3, v4, v5, v6] = p1[v0, v1, v2, v3, v4, v5, v6]
for n_1, oc_chunk_1, oh_1, ow_1, oc_block_0_1, kh_1, kw_1, ic_outer_1, ic_f_inner_1, ic_s_inner_0_1, n_2, oc_chunk_2, oh_2, ow_2, oc_block_0_2 in T.grid(T.int64(1), T.int64(1), T.int64(27), T.int64(3), T.int64(1), T.int64(3), T.int64(3), T.int64(2), T.int64(4), T.int64(1), T.int64(1), T.int64(1), T.int64(1), T.int64(9), T.int64(1)):
with T.block("conv2d_NCHWc_int8_o_update"):
v_n = T.axis.spatial(T.int64(1), T.int64(0))
v_oc_chunk = T.axis.spatial(T.int64(2), oc_chunk_1 + oc_chunk_2 + oc_chunk_0)
v_oh = T.axis.spatial(T.int64(54), oh_2 + oh_0 * T.int64(27) + oh_1)
v_ow = T.axis.spatial(T.int64(54), ow_0 * T.int64(27) + ow_1 * T.int64(9) + ow_2)
v_oc_block_o = T.axis.spatial(T.int64(1), T.int64(0))
v_kh, v_kw, v_ic_outer = T.axis.remap("RRR", [kh_1, kw_1, ic_outer_1])
v_ic_f_inner = T.axis.reduce(T.int64(8), kh_0_kw_0_ic_outer_0_ic_f_inner_0_ic_s_inner_0_0_fused * T.int64(4) + ic_f_inner_1)
v_ic_s_inner_o = T.axis.reduce(T.int64(1), T.int64(0))
T.reads(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, T.int64(0) : T.int64(32)], p0_global_vtcm[v_n, v_ic_outer, v_oh + v_kh, v_ow + v_kw, v_ic_f_inner * T.int64(4) : v_ic_f_inner * T.int64(4) + T.int64(4)], p1_global_vtcm[v_oc_chunk, v_ic_outer, v_kh, v_kw, v_ic_f_inner, T.int64(0) : T.int64(32), T.int64(0) : T.int64(4)])
T.writes(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, T.int64(0) : T.int64(32)])
for oc_block_1, ic_s_inner_1 in T.grid(T.int64(32), T.int64(4)):
with T.block("conv2d_NCHWc_int8"):
v_oc_block_i, v_ic_s_inner_i = T.axis.remap("SR", [oc_block_1, ic_s_inner_1])
T.reads(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i], p0_global_vtcm[v_n, v_ic_outer, v_oh + v_kh, v_ow + v_kw, v_ic_f_inner * T.int64(4) + v_ic_s_inner_i], p1_global_vtcm[v_oc_chunk, v_ic_outer, v_kh, v_kw, v_ic_f_inner, v_oc_block_i, v_ic_s_inner_i])
T.writes(conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i])
T.block_attr({"meta_schedule.tiling_structure":"SRSRS"})
conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i] = conv2d_NCHWc_int8[v_n, v_oc_chunk, v_oh, v_ow, v_oc_block_i] + T.Cast("int32", p0_global_vtcm[v_n, v_ic_outer, v_oh + v_kh, v_ow + v_kw, v_ic_f_inner * T.int64(4) + v_ic_s_inner_i]) * T.Cast("int32", p1_global_vtcm[v_oc_chunk, v_ic_outer, v_kh, v_kw, v_ic_f_inner, v_oc_block_i, v_ic_s_inner_i])
#fmt on
def test_conv2d_vtcm():
def get_target(vtcm_cap):
target = tvm.target.hexagon("v68", vtcm_capacity=vtcm_cap)
return tvm.target.Target(target, host=target)
sch = tir.Schedule(Conv2dNCHWcVTCM, debug_mask="all")
ctx = _create_context(Conv2dNCHWcVTCM, target=get_target(70000))
assert not ctx.space_generator.postprocs[0].apply(sch)
ctx = _create_context(Conv2dNCHWcVTCM, target=get_target(75000))
assert ctx.space_generator.postprocs[0].apply(sch)
if __name__ == "__main__":
tvm.testing.main()
| 9,697 | 74.765625 | 394 | py |
tvm | tvm-main/tests/python/unittest/test_tir_structural_equal_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
import pytest
from tvm import te
from tvm.runtime import ObjectPath
from tvm.script import tir as T, ir as I
def consistent_equal(x, y, map_free_vars=False):
struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)
struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)
xhash = tvm.ir.structural_hash(x, map_free_vars)
yhash = tvm.ir.structural_hash(y, map_free_vars)
if struct_equal0 != struct_equal1:
raise ValueError(
"Non-commutative {} vs {}, sequal0={}, sequal1={}".format(
x, y, struct_equal0, struct_equal1
)
)
# NOTE: hash colision can happen but should be rare.
# we can confirm that hash colison doesn't happen for our testcases
if struct_equal0 != (xhash == yhash):
raise ValueError(
"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format(
x, y, struct_equal0, xhash, yhash
)
)
return struct_equal0
def get_sequal_mismatch(x, y, map_free_vars=False):
mismatch_0 = tvm.ir.base.get_first_structural_mismatch(x, y, map_free_vars)
mismatch_1 = tvm.ir.base.get_first_structural_mismatch(y, x, map_free_vars)
if mismatch_0 is None and mismatch_1 is None:
return None
if (
mismatch_0 is None
or mismatch_1 is None
or mismatch_0[0] != mismatch_1[1]
or mismatch_0[1] != mismatch_1[0]
):
raise ValueError(
"Non-commutative {} vs {}, mismatch_0={}, mismatch_1={}".format(
x, y, mismatch_0, mismatch_1
)
)
return mismatch_0
def test_exprs():
# save load json
x = tvm.tir.const(1, "int32")
y = tvm.tir.const(10, "int32")
vx = te.var("x")
vy = te.var("y")
vz = te.var("z")
zx = vx + vx
zy = vy + vy
assert consistent_equal(zx * zx, (vx + vx) * (vx + vx), map_free_vars=False)
# test assert trigger.
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(x, y)
assert not consistent_equal(vx, vy)
assert consistent_equal(vx, vy, map_free_vars=True)
# corner case lhs:vx == rhs:vy, but cannot map it iteslf
assert not consistent_equal(vx + vx, vy + vx, map_free_vars=True)
# corner case lhs:vx == rhs:vy, lhs:vy == rhs:vx
assert consistent_equal(vx + vy, vy + vx, map_free_vars=True)
# corner case2: rolling remap.
assert consistent_equal(vx + vy + vz, vy + vz + vx, map_free_vars=True)
assert not consistent_equal(vx + 1, vy + 1, map_free_vars=False)
# Defintition remap
assert consistent_equal(tvm.tir.Let(vx, 1, vx - 1), tvm.tir.Let(vy, 1, vy - 1))
# Default same address free var remap
assert consistent_equal(tvm.tir.Let(vx, 1, vx // vz), tvm.tir.Let(vy, 1, vy // vz))
assert consistent_equal(zx * zx, zx * zx)
assert consistent_equal(zx * zx, zy * zy, map_free_vars=True)
assert not consistent_equal(zx * zx, zy * zy, map_free_vars=False)
def test_prim_func():
x = te.var("x")
y = te.var("y")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x + y))
func1 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(y + x))
assert not consistent_equal(func0, func1)
# new cases
b = tvm.tir.decl_buffer((x,), "float32")
stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))
func0 = tvm.tir.PrimFunc([x, y, b], stmt)
# easiest way to deep copy is via save/load
func1 = tvm.ir.load_json(tvm.ir.save_json(func0))
tvm.ir.assert_structural_equal(func0, func1)
data0 = tvm.nd.array([1, 2, 3])
data1 = tvm.nd.array([1, 2, 3])
# attributes and ndarrays
func0 = func0.with_attr("data", data0)
func1 = func1.with_attr("data", data1)
# IRModules
mod0 = tvm.IRModule.from_expr(func0)
mod1 = tvm.IRModule.from_expr(func1)
tvm.ir.assert_structural_equal(mod0, mod1)
def test_prim_func_param_count_mismatch():
x = te.var("x")
y = te.var("y")
z = te.var("z")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x))
func1 = tvm.tir.PrimFunc([x, y, z], tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_lhs_path = ObjectPath.root().attr("params").missing_array_element(2)
expected_rhs_path = ObjectPath.root().attr("params").array_index(2)
assert lhs_path == expected_lhs_path
assert rhs_path == expected_rhs_path
def test_prim_func_param_dtype_mismatch():
x = te.var("x")
y_0 = te.var("y", dtype="int32")
y_1 = te.var("z", dtype="float32")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x, y_0], tvm.tir.Evaluate(x))
func1 = tvm.tir.PrimFunc([x, y_1], tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_path = ObjectPath.root().attr("params").array_index(1).attr("dtype")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_prim_func_body_mismatch():
x_0 = te.var("x")
y_0 = te.var("y")
x_1 = te.var("x")
y_1 = te.var("y")
# counter example of same equality
func0 = tvm.tir.PrimFunc([x_0, y_0], tvm.tir.Evaluate(x_0 + x_0))
func1 = tvm.tir.PrimFunc([x_1, y_1], tvm.tir.Evaluate(x_1 + y_1))
lhs_path, rhs_path = get_sequal_mismatch(func0, func1)
expected_path = ObjectPath.root().attr("body").attr("value").attr("b")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_array():
x = np.arange(10)
nx = tvm.nd.array(x)
ny = tvm.nd.array(x)
nz = tvm.nd.array(x.reshape(2, 5))
assert consistent_equal(nx, ny)
assert not consistent_equal(nx, nz)
def test_env_func():
@tvm.register_func("test.sequal.env_func")
def test(x):
return x + 1
x = tvm.ir.EnvFunc.get("test.sequal.env_func")
y = tvm.ir.EnvFunc.get("test.sequal.env_func")
assert consistent_equal(y, x)
def test_attrs():
x = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx")
y = tvm.ir.make_node("attrs.TestAttrs", axis=1, name="xx")
z = tvm.ir.make_node("attrs.TestAttrs", axis=2, name="xx")
tvm.ir.assert_structural_equal(y, x)
assert not consistent_equal(y, z)
x = tvm.runtime.convert({"x": [1, 2, 3], "y": 2})
y = tvm.runtime.convert({"y": 2, "x": [1, 2, 3]})
z = tvm.runtime.convert({"y": 2, "x": [1, 2, 3, 4]})
assert consistent_equal(y, x)
assert not consistent_equal(y, z)
def test_stmt():
x = te.var("x")
y = te.var("y")
n = 128
A = te.placeholder((n, n), name="A")
B = te.placeholder((n, n), name="B")
ii = te.var("i")
jj = te.var("j")
Ab = tvm.tir.decl_buffer((n,), name="A")
n = te.var("n")
def func2():
ib = tvm.tir.ir_builder.create()
A = ib.buffer_ptr(Ab)
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 2
A[j] = A[j] + 2
return ib.get()
assert consistent_equal(func2(), func2())
def test_buffer_storage_scope():
x = te.var("x", dtype="handle")
buffer_local_0 = tvm.tir.decl_buffer((10, 10), "float32", scope="local")
buffer_local_1 = tvm.tir.decl_buffer((10, 10), "float32", scope="local")
buffer_global = tvm.tir.decl_buffer((10, 10), "float32")
buffer_empty = tvm.tir.decl_buffer((10, 10), "float32", scope="")
func0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_0})
func1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_1})
func2 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_global})
func3 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_empty})
assert consistent_equal(func0, func1)
assert consistent_equal(func2, func3)
assert not consistent_equal(func0, func2)
def test_buffer_map_mismatch():
x = te.var("x")
buffer_0 = tvm.tir.decl_buffer((10, 10))
buffer_0_clone = tvm.tir.decl_buffer((10, 10))
buffer_1 = tvm.tir.decl_buffer((10, 20))
func_0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0})
func_0_clone = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0_clone})
func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_1})
lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1)
expected_path = (
ObjectPath.root().attr("buffer_map").map_value(x).attr("shape").array_index(1).attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
assert get_sequal_mismatch(func_0, func_0_clone) is None
def test_buffer_map_length_mismatch():
x = te.var("x")
y = te.var("x")
buffer_0 = tvm.tir.decl_buffer((10, 10))
buffer_1 = tvm.tir.decl_buffer((10, 20))
func_0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0})
func_1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_0, y: buffer_1})
lhs_path, rhs_path = get_sequal_mismatch(func_0, func_1)
expected_lhs_path = ObjectPath.root().attr("buffer_map").missing_map_entry()
assert lhs_path == expected_lhs_path
expected_rhs_path = ObjectPath.root().attr("buffer_map").map_value(y)
assert rhs_path == expected_rhs_path
def test_buffer_load_store():
b = tvm.tir.decl_buffer((10, 10), "float32")
x = tvm.tir.BufferLoad(b, [0, 1])
y = tvm.tir.BufferLoad(b, [0, 1])
z = tvm.tir.BufferLoad(b, [1, 2])
assert consistent_equal(y, x)
assert not consistent_equal(y, z)
i = tvm.tir.Var("x", "int32")
sx = tvm.tir.BufferStore(b, 0.1, [0, i])
sy = tvm.tir.BufferStore(b, 0.1, [0, i])
sz = tvm.tir.BufferStore(b, 0.1, [1, i])
assert consistent_equal(sy, sx)
assert not consistent_equal(sy, sz)
def test_while():
x = tvm.tir.Var("x", "int32")
y = tvm.tir.Var("y", "int32")
wx = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
wy = tvm.tir.While(y > 0, tvm.tir.Evaluate(y))
assert not consistent_equal(wx, wy)
assert consistent_equal(wx, wy, map_free_vars=True)
def test_while_condition_mismatch():
x = tvm.tir.Var("x", "int32")
w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
w_1 = tvm.tir.While(x < 0, tvm.tir.Evaluate(x))
lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1)
expected_path = ObjectPath.root().attr("condition")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_while_body_mismatch():
x = tvm.tir.Var("x", "int32")
w_0 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))
w_1 = tvm.tir.While(x > 0, tvm.tir.Evaluate(x + 1))
lhs_path, rhs_path = get_sequal_mismatch(w_0, w_1)
expected_path = ObjectPath.root().attr("body").attr("value")
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_mismatch():
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 99),
tvm.tir.Evaluate(x + 3),
]
)
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_path = (
ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_mismatch_different_lengths():
# Make sure we report a difference inside the array first, rather than the difference in length
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 3)])
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_path = (
ObjectPath.root().attr("seq").array_index(2).attr("value").attr("b").attr("value")
)
assert lhs_path == expected_path
assert rhs_path == expected_path
def test_seq_length_mismatch():
x = tvm.tir.Var("x", "int32")
seq_0 = tvm.tir.SeqStmt(
[
tvm.tir.Evaluate(x),
tvm.tir.Evaluate(x + 1),
tvm.tir.Evaluate(x + 2),
tvm.tir.Evaluate(x + 3),
]
)
seq_1 = tvm.tir.SeqStmt([tvm.tir.Evaluate(x), tvm.tir.Evaluate(x + 1), tvm.tir.Evaluate(x + 2)])
lhs_path, rhs_path = get_sequal_mismatch(seq_0, seq_1)
expected_lhs_path = ObjectPath.root().attr("seq").array_index(3)
expected_rhs_path = ObjectPath.root().attr("seq").missing_array_element(3)
assert lhs_path == expected_lhs_path
assert rhs_path == expected_rhs_path
def test_ir_module_equal():
def generate(n: int):
@I.ir_module
class module:
@T.prim_func
def func(A: T.Buffer(1, "int32")):
for i in range(n):
A[0] = A[0] + 1
return module
# Equivalent IRModules should compare as equivalent, even though
# they have distinct GlobalVars, and GlobalVars usually compare by
# reference equality.
tvm.ir.assert_structural_equal(generate(16), generate(16))
# When there is a difference, the location should include the
# function name that caused the failure.
with pytest.raises(ValueError) as err:
tvm.ir.assert_structural_equal(generate(16), generate(32))
assert '<root>.functions[I.GlobalVar("func")].body.extent.value' in err.value.args[0]
if __name__ == "__main__":
tvm.testing.main()
| 14,640 | 33.53066 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_link_params.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import ctypes
import json
import os
import re
from contextlib import redirect_stderr
from io import StringIO
import numpy as np
import tvm
import tvm.relay
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay
from tvm.contrib import utils
from tvm.relay.backend import Executor, Runtime
INPUT_SHAPE = (1, 3, 16, 16)
KERNEL_SHAPE = (3, 3, 3, 3)
# The data types that are linkable.
linkable_dtype = tvm.testing.parameter(
*(
[f"uint{b}" for b in (8, 16, 32, 64)]
+ [f"int{b}" for b in (8, 16, 32, 64)]
+ ["float32", "float64"]
)
)
def dtype_info(dtype):
"""Lookup numpy type info for the given string dtype (of linkable_dtype params above)."""
if "int" in dtype:
return np.iinfo(getattr(np, dtype))
else:
return np.finfo(getattr(np, dtype))
# Note: for debugging, set this to an integer (i.e. 1.0). Then all "random" tensors will become
# predictable
RANDOM_TENSOR_START = None
def _make_random_tensor(dtype, shape):
"""Create a random test tensor with given shape and dtype."""
global RAND_SEED
if RANDOM_TENSOR_START is not None:
to_return = np.arange(
RANDOM_TENSOR_START, RANDOM_TENSOR_START + np.prod(shape), dtype=dtype
).reshape(shape)
RAND_SEED += np.prod(shape)
return to_return
dinfo = dtype_info(dtype)
if "int" in dtype:
return np.random.randint(dinfo.min, dinfo.max, shape, dtype=dtype)
else:
to_return = np.random.uniform(0, dinfo.max, shape).astype(dtype)
np.reshape(to_return, np.prod(shape))[::2] *= -1
return to_return
def _lookup_sid(graph, name):
"""Lookup the storage id of a named parameter.
Arguments
---------
graph : dict
Parsed JSON graph.
name : str
Name of the tensor parameter to lookup.
Returns
-------
int :
The storage_id of the parameter.
"""
num_outputs_seen = 0
for i, n in enumerate(graph["nodes"]):
if n["name"] == name:
print("sid", name, graph["attrs"]["storage_id"][1], num_outputs_seen)
return graph["attrs"]["storage_id"][1][num_outputs_seen]
else:
if "attrs" in n and "num_outputs" in n["attrs"]:
num_outputs_seen += int(n["attrs"]["num_outputs"])
else:
num_outputs_seen += 1
raise KeyError(f"no such param: {name}")
def _get_ctypes_dtype(dt):
"""Return a ctypes c_* datatype given a string data type."""
if "int" in dt:
return getattr(ctypes, f"c_{dt}")
elif dt == "float32":
return ctypes.c_float
elif dt == "float64":
return ctypes.c_double
else:
assert False, f"unknown dtype: {dt}"
def _verify_linked_param(dtype, lib, mod, graph, name):
"""Directly read memory from the linked library to verify the linked parameter is correct."""
sid = _lookup_sid(graph, name)
# NOTE: query_imports=True because when loading a module from disk (i.e. for C backend),
# a GraphExecutorFactory module is created instead of the module itself.
param_ptr = mod.get_function("_lookup_linked_param", True)(sid)
gen_param = lib.params[name]
arr_data = (_get_ctypes_dtype(dtype) * np.prod(gen_param.shape)).from_address(param_ptr.value)
arr = np.ndarray(shape=gen_param.shape, dtype=gen_param.dtype, buffer=arr_data, order="C")
if "int" in gen_param.dtype:
np.testing.assert_equal(gen_param.numpy(), arr)
else:
np.testing.assert_allclose(gen_param.numpy(), arr)
return dtype == gen_param.dtype
def _make_mod_and_params(dtype):
"""Create a Relay module and parameters to test the given datatype."""
param_decls = collections.OrderedDict()
param_init = {}
def _add_decl(name, dtype):
param_decls[name] = f"%{name} : Tensor[{KERNEL_SHAPE}, {dtype}]"
param_init[name] = _make_random_tensor(dtype, KERNEL_SHAPE)
# Add several parameters so that the number of parameters
_add_decl(f"{dtype}_a", dtype)
_add_decl(f"{dtype}_b", dtype)
mod_lines = [
'#[version = "0.0.5"]',
f"def @main(%rand_input : Tensor[{INPUT_SHAPE}, {dtype}], { ', '.join(param_decls.values()) } ) {{",
# This program ensures that GraphPlanMemory alternates between the same two storage IDs for a
# while. In doing this, it ensures that param %{dtype}_b will be placed into the graph at an
# index unequal to its storage_id. This ensures that GraphExecutorCodegen encodes the storage_id
# and not the parameter index into the graph.
(
f' %0 = nn.conv2d(%rand_input, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %1 = nn.conv2d(%0, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %2 = nn.conv2d(%1, %{dtype}_a, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
(
f' %3 = nn.conv2d(%2, %{dtype}_b, data_layout="NCHW", kernel_layout="OIHW", '
f'kernel_size=[3, 3], out_dtype="{dtype}");'
),
" %3",
"}",
]
mod = tvm.relay.fromtext("\n".join(mod_lines))
return mod, param_init
@tvm.testing.requires_llvm
def test_llvm_link_params(linkable_dtype):
ir_mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = ir_mod["main"]
target = "llvm"
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(ir_mod, target, runtime=runtime, executor=executor, params=param_init)
# NOTE: Need to export_library() and load_library() to link all the Module(llvm, ...)
# against one another.
temp_dir = utils.TempDirectory()
export_file = temp_dir / "lib.so"
lib.lib.export_library(export_file)
mod = tvm.runtime.load_module(export_file)
assert len(lib.params.keys()) == 0 # NOTE: params became tir.constants
assert mod.get_function("TVMSystemLibEntryPoint") != None
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(linkable_dtype, lib, mod, graph, p) or found_one
# Wrap in function to explicitly deallocate the runtime.
def _run_linked(lib, mod):
graph_json, _, _ = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input) # NOTE: params not required.
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib, mod)
runtime = Runtime("cpp", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(ir_mod, "llvm", runtime=runtime, params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, **lowered_params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy())
else:
np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy())
def _get_c_datatype(dtype):
"""Translate LINKABLE_DTYPES element to c datatype."""
if "int" in dtype:
return f"{dtype}_t"
elif dtype == "float32":
return "float"
elif dtype == "float64":
return "double"
else:
assert False, f"unknown dtype {dtype}"
HEX_NUM_RE = re.compile(r"[+\-]?(?:(?:0x[0-9A-Fa-f.p+-]+)|(?:INFINITY)|(?:NAN))")
def test_c_link_params(linkable_dtype):
temp_dir = utils.tempdir()
mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c"
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, target, executor=executor, params=param_init)
assert len(lib.params.keys()) == 0 # NOTE: params became tir.constants
src = lib.lib.get_source()
lib.lib.save(temp_dir.relpath("test.c"), "c")
c_dtype = _get_c_datatype(linkable_dtype)
src_lines = src.split("\n")
param = param_init[f"{linkable_dtype}_a"].reshape(np.prod(KERNEL_SHAPE))
param_def = rf"^static const {c_dtype} __attribute__\(\(section\(\".rodata.tvm\"\), aligned\(16\)\)\) [a-zA-Z_0-9]*constant_\d+\[{np.prod(param.shape)}\] = {{$"
for i, line in enumerate(src_lines):
if re.match(param_def, line):
i += 1
break
else:
assert False, f'did not find parameter definition "{param_def}":\n{src}'
cursor = 0
width = dtype_info(linkable_dtype).bits // 4 + 2
if linkable_dtype.startswith("int"):
width += 1 # Account for sign
while "};" not in src_lines[i]:
for match in HEX_NUM_RE.finditer(src_lines[i]):
cursor += 1
i += 1
assert cursor == np.prod(param.shape)
# Need a unique name per library to avoid dlopen caching the lib load.
lib_path = temp_dir.relpath(f"test-{linkable_dtype}-linked.so")
lib["remove_params"]().export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
# lib_mod = lib_factory['default']()
graph = json.loads(lib.graph_json)
for p in lib.params:
_verify_linked_param(linkable_dtype, lib, lib_mod, graph, p)
# Wrap in function to explicitly deallocate the runtime.
def _run_linked(lib_mod):
graph_rt = tvm.contrib.graph_executor.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input) # NOTE: params not required.
graph_rt.run()
return graph_rt.get_output(0)
linked_output = _run_linked(lib_mod)
linked_params = lib.params
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lib = tvm.relay.build(mod, "c", params=param_init)
_, _, params = lib
# Need a unique name per library to avoid dlopen caching the lib load.
lib_path = temp_dir.relpath(f"test-{linkable_dtype}-unlinked.so")
lib.export_library(lib_path)
lib_mod = tvm.runtime.load_module(lib_path)
def _run_unlinked(lib_mod):
graph_rt = tvm.contrib.graph_executor.GraphModule(lib_mod["default"](tvm.cpu(0)))
graph_rt.set_input("rand_input", rand_input, **params)
graph_rt.run()
return graph_rt.get_output(0)
unlinked_output = _run_unlinked(lib_mod)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output.numpy(), linked_output.numpy())
else:
np.testing.assert_allclose(unlinked_output.numpy(), linked_output.numpy())
@tvm.testing.requires_micro
def test_crt_link_params(linkable_dtype):
from tvm import micro
mod, param_init = _make_mod_and_params(linkable_dtype)
rand_input = _make_random_tensor(linkable_dtype, INPUT_SHAPE)
main_func = mod["main"]
target = "c"
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("graph", {"link-params": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(
mod, target, runtime=runtime, executor=executor, params=param_init
)
assert len(factory.get_params().keys()) == 0 # NOTE: params became tir.constants
temp_dir = tvm.contrib.utils.tempdir()
template_project_dir = tvm.micro.get_microtvm_template_projects("crt")
project = tvm.micro.generate_project(
template_project_dir, factory, temp_dir / "project", {"verbose": 1}
)
project.build()
project.flash()
with tvm.micro.Session(project.transport()) as sess:
graph_rt = tvm.micro.session.create_local_graph_executor(
factory.get_graph_json(), sess.get_system_lib(), sess.device
)
assert len(factory.params.keys()) == 0 # NOTE: params became tir.constants
# NOTE: not setting params here.
graph_rt.set_input("rand_input", rand_input)
graph_rt.run()
linked_output = graph_rt.get_output(0).numpy()
runtime = Runtime("cpp", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, "llvm", runtime=runtime, params=param_init)
def _run_unlinked(lib):
graph_json, mod, lowered_params = lib
graph_rt = tvm.contrib.graph_executor.create(graph_json, mod, tvm.cpu(0))
graph_rt.set_input("rand_input", rand_input, **lowered_params)
graph_rt.run()
return graph_rt.get_output(0).numpy()
unlinked_output = _run_unlinked(lib)
if "int" in linkable_dtype:
np.testing.assert_equal(unlinked_output, linked_output)
else:
np.testing.assert_allclose(unlinked_output, linked_output)
def test_tir_link_params():
def get_dense(data_shape, weight_shape):
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
dense = relay.nn.dense(data, weight)
return relay.Function([data, weight], dense)
def get_ref_dense(data_np, weight_np):
return np.dot(data_np, np.transpose(weight_np))
def schedule_dense(sch):
dense = sch.get_block("T_matmul_NT")
_y, _x, _k = sch.get_loops(dense)
M, N, K = 128, 128, 128
data_shape = (M, K)
weight_shape = (N, K)
relay_mod = tvm.IRModule.from_expr(get_dense(data_shape, weight_shape))
relay_mod = relay.transform.InferType()(relay_mod)
data_np = np.random.randn(*data_shape).astype("float32")
weight_np = np.random.randn(*weight_shape).astype("float32")
target = "llvm"
params = {"weight": weight_np}
def schedule_fn(sch):
if "nn_dense" in sch.mod.attrs["task_name"]:
schedule_dense(sch)
return True
return False
with StringIO() as stderr_buf, redirect_stderr(stderr_buf):
with ms.database.ScheduleFnDatabase(schedule_fn), tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_meta_schedule": True},
):
executor = Executor("graph", {"link-params": True})
lib = relay.build(relay_mod, target=target, executor=executor)
# Workload look up should succeed. This does not work when the test is invoked from pytest.
assert not "Cannot find workload" in stderr_buf.getvalue()
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input(**params)
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = get_ref_dense(data_np, weight_np)
tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
tvm.testing.main()
| 16,531 | 36.744292 | 168 | py |
tvm | tvm-main/tests/python/unittest/test_tir_data_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout and bijective-layout node"""
import pytest
import tvm
import tvm.error
from tvm.topi.utils import get_const_tuple
def test_layout():
layout = tvm.tir.layout("NCHW16c")
assert layout is not None
assert isinstance(layout, tvm.tir.Layout)
assert layout.factor_of("c") == 16
assert layout.factor_of("C") == 16
assert layout.factor_of("N") == -1
assert layout.index_of("N") == 0
assert layout.index_of("C") == 1
assert layout.index_of("H") == 2
assert layout.index_of("W") == 3
assert layout.index_of("c") == 4
assert layout.index_of("O") == -1
assert "N" in layout
assert "C" in layout
assert "H" in layout
assert "W" in layout
assert "c" in layout
assert "O" not in layout
assert layout[0] == "N"
assert layout[1] == "C"
assert layout[2] == "H"
assert layout[3] == "W"
assert layout[4] == "c"
assert layout[-1] == "c"
def test_layout_dtype():
layout_i32 = tvm.tir.layout("NCHW")
assert layout_i32.axes[0].var.dtype == "int32"
assert layout_i32.axes[0].dom.min.dtype == "int32"
assert layout_i32.axes[0].dom.extent.dtype == "int32"
assert layout_i32.axes[1].var.dtype == "int32"
assert layout_i32.axes[1].dom.min.dtype == "int32"
assert layout_i32.axes[1].dom.extent.dtype == "int32"
layout_i64 = tvm.tir.layout("NCHW", dtype="int64")
assert layout_i64.axes[2].var.dtype == "int64"
assert layout_i64.axes[2].dom.min.dtype == "int64"
assert layout_i64.axes[2].dom.extent.dtype == "int64"
assert layout_i64.axes[3].var.dtype == "int64"
assert layout_i64.axes[3].dom.min.dtype == "int64"
assert layout_i64.axes[3].dom.extent.dtype == "int64"
with pytest.raises(TypeError):
tvm.tir.layout("NCHW", dtype="float32")
with pytest.raises(TypeError):
tvm.tir.layout("NCHW", dtype=None)
def test_bilayout_convertible():
# not convertible
assert tvm.tir.bijective_layout("NCHW", "ABCD") is None
assert tvm.tir.bijective_layout("__undef__", "NCHW") is None
assert tvm.tir.bijective_layout("NCHW", "__undef__") is None
assert tvm.tir.bijective_layout("__undef__", "__undef__") is None
assert tvm.tir.bijective_layout("", "NCHW") is None
assert tvm.tir.bijective_layout("NCHW", "") is None
assert tvm.tir.bijective_layout("", "") is None
# convertible
assert tvm.tir.bijective_layout("NCHW", "NCHW16c") is not None
def test_bilayout_shape():
bilayout = tvm.tir.bijective_layout("NCHW", "NCHW16c")
assert isinstance(bilayout, tvm.tir.BijectiveLayout)
dst_shape = bilayout.forward_shape((1, 32, 7, 7))
assert get_const_tuple(dst_shape) == (1, 2, 7, 7, 16)
src_shape = bilayout.backward_shape(dst_shape)
assert get_const_tuple(src_shape) == (1, 32, 7, 7)
def test_bilayout_index():
bilayout = tvm.tir.bijective_layout("NCHW", "NCHW16c")
dst_index = bilayout.forward_index([0, 18, 6, 6])
assert get_const_tuple(dst_index) == (0, 1, 6, 6, 2)
src_index = bilayout.backward_index([0, 1, 6, 6, 2])
assert get_const_tuple(src_index) == (0, 18, 6, 6)
if __name__ == "__main__":
test_layout()
test_layout_dtype()
test_bilayout_convertible()
test_bilayout_shape()
test_bilayout_index()
| 4,058 | 33.109244 | 69 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_complete.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.ir import Range
from tvm.script import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(32, 32):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
for ii, jj in T.grid(4, 4):
C[vi * 4 + ii, vj * 4 + jj] = T.float32(0)
for k in range(0, 32):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
for ii, jj, kk in T.grid(4, 4, 4):
C[vi * 4 + ii, vj * 4 + jj] = (
C[vi * 4 + ii, vj * 4 + jj]
+ A[vi * 4 + ii, vk * 4 + kk] * B[vj * 4 + jj, vk * 4 + kk]
)
@T.prim_func
def elementwise_with_root(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
def func_with_opaque_block(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
with T.block():
B[0, 0] = A[0, 0] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
with T.block():
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
for i, j in T.grid(128, 128):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
def test_complete_matmul():
func = matmul
A, B, C = [func.buffer_map[x] for x in func.params]
block = func.body.block.body.body.body.body.block
assert isinstance(block, tvm.tir.Block)
vi, vj, vk = [x.var for x in block.iter_vars]
access_A = tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vk, 1)])
access_B = tvm.tir.BufferRegion(B, [Range.from_min_extent(vj, 1), Range.from_min_extent(vk, 1)])
access_C = tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])
tvm.ir.assert_structural_equal(block.reads, [access_A, access_B])
tvm.ir.assert_structural_equal(block.writes, [access_C])
def test_complete_matmul_original():
func = matmul_original
A, B, C = [func.buffer_map[x] for x in func.params]
block1 = func.body.block.body.body.body[0].block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block1.reads, [])
tvm.ir.assert_structural_equal(block1.writes, [access_C])
block2 = func.body.block.body.body.body[1].body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj, vk = [x.var for x in block2.iter_vars]
access_A = tvm.tir.BufferRegion(
A, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_B = tvm.tir.BufferRegion(
B, [Range.from_min_extent(vj * 4, 4), Range.from_min_extent(vk * 4, 4)]
)
access_C = tvm.tir.BufferRegion(
C, [Range.from_min_extent(vi * 4, 4), Range.from_min_extent(vj * 4, 4)]
)
tvm.ir.assert_structural_equal(block2.reads, [access_C, access_A, access_B])
tvm.ir.assert_structural_equal(block2.writes, [access_C])
def _check_elementwise(func):
A, B, C = [func.buffer_map[x] for x in func.params]
root_block = func.body.block
assert len(root_block.reads) == 0
assert len(root_block.writes) == 0
block1 = func.body.block.body[0].body.body.block
assert isinstance(block1, tvm.tir.Block)
vi, vj = [x.var for x in block1.iter_vars]
tvm.ir.assert_structural_equal(
block1.reads,
[tvm.tir.BufferRegion(A, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block1.writes,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
block2 = func.body.block.body[1].body.body.block
assert isinstance(block2, tvm.tir.Block)
vi, vj = [x.var for x in block2.iter_vars]
tvm.ir.assert_structural_equal(
block2.reads,
[tvm.tir.BufferRegion(B, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
tvm.ir.assert_structural_equal(
block2.writes,
[tvm.tir.BufferRegion(C, [Range.from_min_extent(vi, 1), Range.from_min_extent(vj, 1)])],
)
def test_complete_with_root():
_check_elementwise(elementwise_with_root)
def test_complete_part_region():
_check_elementwise(func_with_part_access_region)
@T.prim_func
def func_with_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.match_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def expected_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads([data_buf[vi, index_buf[0]], index_buf[0]])
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[vi, index_buf[0]]
@T.prim_func
def func_with_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
data_buf = T.match_buffer(data, (16, 16), "float32")
index_buf = T.match_buffer(index, (1,), "int32")
out_buf = T.alloc_buffer((16, 16), "float32")
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
@T.prim_func
def expected_recursive_bufferslice_indices(data: T.handle, index: T.handle) -> None:
index_buf = T.match_buffer(index, [1], dtype="int32", elem_offset=0, align=64, offset_factor=1)
data_buf = T.match_buffer(data, [16, 16], elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
out_buf = T.alloc_buffer([16, 16], elem_offset=0, align=64, offset_factor=1)
for i0, i1 in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i0, i1])
T.reads(
[
data_buf[index_buf[index_buf[0]], index_buf[0]],
index_buf[T.min(index_buf[0], 0) : T.max(index_buf[0], 0) + 1],
]
)
T.writes([out_buf[vi, vj]])
out_buf[vi, vj] = data_buf[index_buf[index_buf[0]], index_buf[0]]
def test_complete_buffer_indices():
new_func = tvm.script.from_source(func_with_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_bufferslice_indices)
new_func = tvm.script.from_source(func_with_recursive_bufferslice_indices.script())
tvm.ir.assert_structural_equal(new_func, expected_recursive_bufferslice_indices)
@T.prim_func
def match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
for j in range(0, 16):
with T.block():
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
@T.prim_func
def expected_match_buffer_func(a: T.handle) -> None:
A = T.match_buffer(a, (16, 16))
for i in range(0, 16):
with T.block():
T.reads([])
T.writes(A[i, 0:16])
A0 = T.match_buffer(A[i, 0:16], (16))
with T.block():
T.reads([])
T.writes(A0[0:16])
for j in range(0, 16):
with T.block():
T.reads([])
T.writes(A0[j])
A1 = T.match_buffer(A0[j], ())
A1[()] = 1.0
def test_complete_match_buffer():
tvm.ir.assert_structural_equal(match_buffer_func, expected_match_buffer_func)
@T.prim_func
def alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32")
B = T.match_buffer(b, [2, 2], dtype="float32")
C = T.alloc_buffer([2, 2], dtype="float32")
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0, 0)]
B[(0, 0)] = C[(0, 0)]
@T.prim_func
def expect_alloc_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer([2, 2], dtype="float32", elem_offset=0, align=64, offset_factor=1)
A[(0, 0)] = T.float32(2)
C[(0, 0)] = A[(0, 0)] + B[(0, 0)]
B[(0, 0)] = C[(0, 0)]
def test_complete_alloc_buffer():
rt_func = tvm.script.from_source(alloc_buffer_func.script())
tvm.ir.assert_structural_equal(alloc_buffer_func, expect_alloc_buffer_func)
if __name__ == "__main__":
test_complete_matmul()
test_complete_matmul_original()
test_complete_with_root()
test_complete_part_region()
test_complete_buffer_indices()
test_complete_match_buffer()
test_complete_alloc_buffer()
| 12,188 | 35.494012 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_coproc_sync.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
# register the ops
tvm.ir.register_op_attr("tir.cop.coproc_sync", "TGlobalSymbol", "coproc_sync")
tvm.ir.register_op_attr("tir.cop.coproc_read_barrier", "TGlobalSymbol", "coproc_readb")
tvm.ir.register_op_attr("tir.cop.coproc_write_barrier", "TGlobalSymbol", "coproc_writeb")
tvm.ir.register_op_attr("tir.cop.coproc_dep_push", "TGlobalSymbol", "coproc_dep_push")
tvm.ir.register_op_attr("tir.cop.coproc_dep_pop", "TGlobalSymbol", "coproc_dep_pop")
def test_coproc_sync():
@tvm.register_func("tvm.info.mem.global.cache")
def meminfo_cache():
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_simd_bits=32,
max_num_bits=128,
head_address=tvm.tir.call_extern("handle", "global_cache"),
)
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 8, name="k") as k:
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_scope", 1)
A[j] = A[j + k * 10] + 2
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
body = stmt.body.body
blist = tvm.tir.stmt_list(body)
assert blist[1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_read_barrier"))
assert blist[1].value.args[3].value == 80
assert blist[-2].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_sync"))
assert blist[-1].value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_write_barrier"))
assert blist[-1].value.args[3].value == 10
def test_coproc_sync2():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
ty = te.thread_axis("cthread")
A = ib.allocate("float32", 128, name="A")
ib.scope_attr(ty, "virtual_thread", 2)
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[ty] = 0.0
with ib.for_range(0, n, name="i") as i:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[ty] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[ty] = 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
def test_coproc_sync3():
def __check_list(tvm_array, py_list):
for ti, li in zip(tvm_array, py_list):
if ti.value != li:
return False
return True
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
cp = te.thread_axis((0, 1), "cop")
A = ib.allocate("float32", 128, name="A", scope="global.cache")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, n, name="i") as j:
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 1)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 2)
A[i] = 1.0
with ib.new_scope():
ib.scope_attr(cp, "coproc_scope", 3)
A[0] = 0.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
stmt = tvm.tir.transform.CoProcSync()(mod)["main"].body
slist = tvm.tir.stmt_list(stmt[0].body)
push_st = slist[2]
slist = tvm.tir.stmt_list(slist[-1])
pop_st = slist[0].body[0]
assert push_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_push"))
assert __check_list(push_st.value.args, [2, 3])
assert pop_st.value.op.same_as(tvm.ir.Op.get("tir.cop.coproc_dep_pop"))
assert __check_list(pop_st.value.args, [2, 3])
if __name__ == "__main__":
test_coproc_sync()
test_coproc_sync2()
test_coproc_sync3()
| 4,717 | 35.292308 | 89 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_ir_builder_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.ir_builder.base"""
import pytest
from tvm.script.ir_builder import IRBuilder
def test_ir_builder_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
assert IRBuilder.current() == ib
def test_ir_builder_multi_scope():
with IRBuilder() as ib: # pylint: disable=invalid-name
with IRBuilder() as ib2: # pylint: disable=invalid-name
assert IRBuilder.current() == ib2
assert IRBuilder.current() == ib
def test_ir_builder_no_scope():
with pytest.raises(ValueError):
IRBuilder.current()
if __name__ == "__main__":
test_ir_builder_scope()
test_ir_builder_multi_scope()
test_ir_builder_no_scope()
| 1,491 | 33.697674 | 64 | py |
tvm | tvm-main/tests/python/unittest/test_tir_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te, tir
from tvm import topi
from tvm.contrib import utils, clang
from tvm.script import tir as T
import numpy as np
import ctypes
import math
def test_nearbyint():
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
A_rounded = te.compute((m,), lambda *i: tvm.tir.nearbyint(A(*i)), name="A")
s = te.create_schedule(A_rounded.op)
f = tvm.build(s, [A, A_rounded], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(high=100, size=n).astype(A.dtype), dev)
a_rounded = tvm.nd.array(np.random.uniform(size=n).astype(A_rounded.dtype), dev)
f(a, a_rounded)
# Note that numpys rint rounds to nearest integer with
# ties to halfway is broken by rounding to even.
# So that 1.5 and 2.5 will round 2.
# This is the default rounding mode with libc as well.
# However one can set a different rounding mode and in that
# case numpy result might differ.
tvm.testing.assert_allclose(a_rounded.numpy(), np.rint(a.numpy()))
def test_round_intrinsics_on_int():
i = tvm.te.var("i", "int32")
for op in [tvm.tir.round, tvm.tir.trunc, tvm.tir.ceil, tvm.tir.floor, tvm.tir.nearbyint]:
assert op(tvm.tir.const(10, "int32")).value == 10
assert op(tvm.tir.const(True, "bool")).value == True
assert op(i).same_as(i)
assert tvm.tir.isnan(tvm.tir.const(10, "int32")).value == False
def test_unary_intrin():
test_funcs = [
(tvm.tir.exp10, lambda x: np.power(10, x)),
(tvm.tir.log2, lambda x: np.log2(x)),
(tvm.tir.log10, lambda x: np.log10(x)),
(tvm.tir.sinh, lambda x: np.sinh(x)),
(tvm.tir.cosh, lambda x: np.cosh(x)),
(tvm.tir.log1p, lambda x: np.log1p(x)),
(tvm.tir.asin, lambda x: np.arcsin(x)),
(tvm.tir.acos, lambda x: np.arccos(x)),
(tvm.tir.atan, lambda x: np.arctan(x)),
(tvm.tir.asinh, lambda x: np.arcsinh(x)),
(tvm.tir.acosh, lambda x: np.arccosh(x)),
(tvm.tir.atanh, lambda x: np.arctanh(x)),
]
def run_test(tvm_intrin, np_func):
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda *i: tvm_intrin(A(*i)), name="B")
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0.1, 0.5, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-5, rtol=1e-5)
for func in test_funcs:
run_test(*func)
def test_binary_intrin():
test_funcs = [
(tvm.tir.atan2, lambda x1, x2: np.arctan2(x1, x2)),
(tvm.tir.nextafter, lambda x1, x2: np.nextafter(x1, x2)),
(tvm.tir.copysign, lambda x1, x2: np.copysign(x1, x2)),
(tvm.tir.hypot, lambda x1, x2: np.hypot(x1, x2)),
]
def run_test(tvm_intrin, np_func):
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.placeholder((m,), name="B")
C = te.compute((m,), lambda *i: tvm_intrin(A(*i), B(*i)), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np_func(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)
for func in test_funcs:
run_test(*func)
def test_ldexp():
m = te.var(
"m",
)
A = te.placeholder((m,), name="A")
B = te.placeholder((m,), name="B", dtype="int32")
C = te.compute((m,), lambda *i: tvm.tir.ldexp(A(*i), B(*i)), name="C")
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
dev = tvm.cpu(0)
n = 10
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.randint(0, 5, size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.ldexp(a.numpy(), b.numpy()), atol=1e-5, rtol=1e-5)
dtype = tvm.testing.parameter("int32", "int64")
@tvm.testing.parametrize_targets("llvm", "vulkan -from_device=0")
def test_clz(target, dev, dtype):
target = tvm.target.Target(target)
if (
target.kind.name == "vulkan"
and dtype == "int64"
and not target.attrs.get("supports_int64", False)
):
pytest.xfail("Vulkan target does not support Int64 types")
def clz_np(x, dtype):
ceil_log2 = np.ceil(np.log2(x)).astype(dtype)
bits = int(dtype[-2:])
clz = bits - ceil_log2
clz[np.bitwise_and(x, x - 1) == 0] -= 1
return clz
m = te.var("m")
A = te.placeholder((m,), name="A", dtype=dtype)
B = te.compute((m,), lambda *i: tvm.tir.clz(A(*i)), name="B")
s = te.create_schedule(B.op)
if target.kind.name == "vulkan":
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], target)
n = 10
highs = [10, 100, 1000, 10000, 100000, 1000000]
if dtype == "int64":
highs.append((1 << 63) - 1)
for high in highs:
a_np = np.random.randint(1, high=high, size=(n,), dtype=dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros((n,)).astype("int32"), dev)
f(a, b)
ref = clz_np(a_np, dtype)
np.testing.assert_equal(b.numpy(), ref)
@tvm.script.ir_module
class Module:
@T.prim_func
def test_tir_fma(A: T.handle, B: T.handle, C: T.handle, d: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_fma", "tir.noalias": True})
n = T.int32()
stride = T.int32()
stride_1 = T.int32()
stride_2 = T.int32()
stride_3 = T.int32()
A_1 = T.match_buffer(
A,
[n],
strides=[stride],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
B_1 = T.match_buffer(
B,
[n],
strides=[stride_1],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
C_1 = T.match_buffer(
C,
[n],
strides=[stride_2],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
d_1 = T.match_buffer(
d,
[n],
strides=[stride_3],
elem_offset=0,
align=64,
offset_factor=1,
buffer_type="auto",
)
# body
for i in T.serial(0, n):
d_1[(i * stride_3)] = (A_1[(i * stride)] * B_1[(i * stride_1)]) + C_1[(i * stride_2)]
def test_fma():
opt = tvm.transform.Sequential(
[
tvm.tir.transform.Apply(lambda f: f.with_attr("target", tvm.target.Target("llvm"))),
tvm.tir.transform.LowerIntrin(),
]
)
mod = opt(Module)
assert mod["test_tir_fma"].body.body.value.op.name == "tir.call_llvm_pure_intrin"
if __name__ == "__main__":
test_nearbyint()
test_unary_intrin()
test_round_intrinsics_on_int()
test_binary_intrin()
test_ldexp()
test_clz()
test_fma()
| 8,536 | 31.708812 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tir_specialize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring, missing-module-docstring
import tvm
from tvm.script import tir as T
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle, n: T.int32) -> None:
m = T.int32()
A = T.match_buffer(a, [m, n])
B = T.match_buffer(b, [m, n])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, n):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_128(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_m_128(a: T.handle, b: T.handle, c: T.handle) -> None:
m = T.int32()
A = T.match_buffer(a, [m, 128])
B = T.match_buffer(b, [m, 128])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_m_8x(a: T.handle, b: T.handle, c: T.handle) -> None:
x = T.int32()
m = T.int32()
A = T.match_buffer(a, [m, x * 8])
B = T.match_buffer(b, [m, x * 8])
C = T.match_buffer(c, [m, m])
for i, j, k in T.grid(m, m, x * 8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def element_wise(a: T.handle, c: T.handle) -> None:
m = T.int32()
n = T.int32()
A = T.match_buffer(a, (m, n), "float32")
C = T.match_buffer(c, (m, n), "float32")
B = T.alloc_buffer((m, n), "float32")
for i, j in T.grid(m, n):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(m, n):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_128_64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 64), "float32")
C = T.match_buffer(c, (128, 64), "float32")
B = T.alloc_buffer((128, 64), "float32")
for i, j in T.grid(128, 64):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 64):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_128_n(a: T.handle, c: T.handle) -> None:
n = T.int32()
A = T.match_buffer(a, (128, n), "float32")
C = T.match_buffer(c, (128, n), "float32")
B = T.alloc_buffer((128, n), "float32")
for i, j in T.grid(128, n):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, n):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def mem_copy(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32, q: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=q)
B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=q)
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def mem_copy_16_16_8_4(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (16, 16), "float32", strides=[8, 1], elem_offset=4)
B = T.match_buffer(b, (16, 16), "float32", strides=[8, 1], elem_offset=4)
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def mem_copy_m_n_p_n(a: T.handle, b: T.handle, m: T.int32, n: T.int32, p: T.int32) -> None:
A = T.match_buffer(a, (m, n), "float32", strides=[p, 1], elem_offset=n)
B = T.match_buffer(b, (m, n), "float32", strides=[p, 1], elem_offset=n)
for i, j in T.grid(m, n):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
@T.prim_func
def param_in_arith_exprs(a: T.handle, b: T.handle) -> None:
n = T.int32()
A = T.match_buffer(a, [n // 8, 8], "int32")
B = T.match_buffer(b, [n], "int32")
for i in range(n - 1):
with T.block():
vi = T.axis.S(n - 1, i)
B[vi] = A[vi // 8, vi % 8] + (n + 1) * 42
@T.prim_func
def param_in_arith_exprs_n_16(a: T.handle, b: T.handle) -> None:
n = T.int32()
A = T.match_buffer(a, [2, 8], "int32")
B = T.match_buffer(b, [16], "int32")
for i in range(15):
with T.block():
vi = T.axis.S(15, i)
B[vi] = A[vi // 8, vi % 8] + 714
def test_specialize_nothing():
func = matmul.specialize({})
assert func.same_as(matmul) # Pointer the same
def test_specialize_matmul():
a, _, _, n = matmul.params
# fully specialized
func = matmul.specialize({a: tvm.tir.decl_buffer((128, 128))})
tvm.ir.assert_structural_equal(func, matmul_128)
# partially specialized
func = matmul.specialize({n: 128})
tvm.ir.assert_structural_equal(func, matmul_m_128)
# symbolic specialized
func = matmul.specialize({n: tvm.tir.Var("x", "int32") * 8})
tvm.ir.assert_structural_equal(func, matmul_m_8x)
def test_specialize_elemwise():
a, c = element_wise.params
C = element_wise.buffer_map[c]
# fully specialized
func = element_wise.specialize({a: tvm.tir.decl_buffer((128, 64))})
tvm.ir.assert_structural_equal(func, element_wise_128_64)
# partially specialized
func = element_wise.specialize({c: tvm.tir.decl_buffer((128, C.shape[1]))})
tvm.ir.assert_structural_equal(func, element_wise_128_n)
def test_specialize_mem_copy():
a, _, m, n, p, q = mem_copy.params
# fully specialized
func = mem_copy.specialize({a: tvm.tir.decl_buffer((16, 16), strides=[8, 1], elem_offset=4)})
tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4)
func = mem_copy.specialize({n: 16, m: 16, p: 8, q: 4})
tvm.ir.assert_structural_equal(func, mem_copy_16_16_8_4)
# partially specialized
func = mem_copy.specialize({q: n})
tvm.ir.assert_structural_equal(func, mem_copy_m_n_p_n)
def test_specialize_recursive_load():
# TODO(Siyuan): add recursive Load testcase, e.g. A[C[i]]
pass
def test_specialize_with_const_folding():
b = param_in_arith_exprs.params[1]
func = param_in_arith_exprs.specialize({b: tvm.tir.decl_buffer([16])})
tvm.ir.assert_structural_equal(func, param_in_arith_exprs_n_16)
if __name__ == "__main__":
test_specialize_nothing()
test_specialize_matmul()
test_specialize_elemwise()
test_specialize_mem_copy()
test_specialize_recursive_load()
test_specialize_with_const_folding()
| 8,154 | 31.361111 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_memhammer_lower_auto_copy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
import sys
import pytest
@tvm.script.ir_module
class Transpose:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer([16, 128], dtype="float32", scope="shared.dyn")
with T.block("A_shared"):
T.block_attr({"auto_copy": 1})
for ax0, ax1 in T.grid(128, 16):
A_shared_dyn[ax1, ax0] = A[ax0, ax1]
with T.block("B"):
T.block_attr({"auto_copy": 1})
for ax1, ax0 in T.grid(16, 128):
B[ax1, ax0] = A_shared_dyn[ax1, ax0]
@tvm.script.ir_module
class GlobalToShared:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float32", scope="shared.dyn"
)
with T.block("A_shared"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for ax0, ax1 in T.grid(128, 128):
A_shared_dyn[ax0, ax1] = A[bx * 128 + ax0, by * 128 + ax1]
with T.block("B"):
for ax0, ax1 in T.grid(128, 128):
B[bx * 128 + ax0, by * 128 + ax1] = A_shared_dyn[ax0, ax1]
@tvm.script.ir_module
class SharedToGlobal:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float32", scope="shared.dyn"
)
with T.block("A_shared"):
for ax0, ax1 in T.grid(128, 128):
A_shared_dyn[ax1, ax0] = A[bx * 128 + ax0, by * 128 + ax1]
with T.block("B"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for ax1, ax0 in T.grid(128, 128):
B[bx * 128 + ax0, by * 128 + ax1] = A_shared_dyn[ax1, ax0]
@tvm.script.ir_module
class GlobalToSharedWithLocalStage:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float32", scope="shared.dyn"
)
with T.block("A_shared"):
T.block_attr(
{"auto_copy": 1, "vector_bytes": 16, "local_stage": True}
)
for ax0, ax1 in T.grid(128, 128):
A_shared_dyn[ax0, ax1] = A[bx * 128 + ax0, by * 128 + ax1]
with T.block("B"):
for ax0, ax1 in T.grid(128, 128):
B[bx * 128 + ax0, by * 128 + ax1] = A_shared_dyn[ax0, ax1]
@tvm.script.ir_module
class SharedToWmma:
@T.prim_func
def main() -> None:
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float16", scope="shared.dyn"
)
A_wmma = T.alloc_buffer(
[128, 128], dtype="float16", scope="wmma.matrix_a"
)
with T.block("A_wmma"):
T.block_attr({"auto_copy": 1})
for ax0, ax1 in T.grid(128, 128):
A_wmma[ax0, ax1] = A_shared_dyn[ax0, ax1]
@tvm.script.ir_module
class WmmaToShared:
@T.prim_func
def main() -> None:
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="wmma.accumulator"
)
C_shared = T.alloc_buffer(
[128, 128], dtype="float32", scope="shared.dyn"
)
with T.block("C_shared"):
T.block_attr({"auto_copy": 1})
for ax0, ax1 in T.grid(128, 128):
C_shared[ax0, ax1] = C_accum[ax0, ax1]
@tvm.script.ir_module
class WmmaToGlobal:
@T.prim_func
def main(c: T.handle) -> None:
C = T.match_buffer(c, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="wmma.accumulator"
)
with T.block("C_global"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for ax0, ax1 in T.grid(128, 128):
C[bx * 128 + ax0, by * 128 + ax1] = C_accum[ax0, ax1]
@tvm.script.ir_module
class WmmaToGlobalWithFusion:
@T.prim_func
def main(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [1024])
C = T.match_buffer(c, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="wmma.accumulator"
)
with T.block("C_global"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for ax0, ax1 in T.grid(128, 128):
C[bx * 128 + ax0, by * 128 + ax1] = (
C_accum[ax0, ax1] + A[bx * 128 + ax0]
)
@tvm.script.ir_module
class MmaToGlobal:
@T.prim_func
def main(c: T.handle) -> None:
C = T.match_buffer(c, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="m16n8k8.matrixC"
)
with T.block("C_global"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for ax0, ax1 in T.grid(128, 128):
C[bx * 128 + ax0, by * 128 + ax1] = C_accum[ax0, ax1]
@tvm.script.ir_module
class TransformedGlobalToShared:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float32", strides=[128, 1], scope="shared.dyn"
)
with T.block("A_shared"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for outer in T.serial(16):
for ty_1 in T.thread_binding(8, thread="threadIdx.y"):
for tx in T.thread_binding(32, thread="threadIdx.x"):
for vec in T.vectorized(4):
A_shared_dyn[
(((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
// 128
% 128,
(((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
% 128,
] = A[
bx * 128
+ (((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
// 128
% 128,
by * 128
+ (((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
% 128,
]
with T.block("B"):
for ax0, ax1 in T.grid(128, 128):
B[bx * 128 + ax0, by * 128 + ax1] = A_shared_dyn[ax0, ax1]
@tvm.script.ir_module
class TransformedSharedToGlobal:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [1024, 1024])
B = T.match_buffer(b, [1024, 1024])
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float32", strides=[129, 1], scope="shared.dyn"
)
with T.block("A_shared"):
T.reads(A[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.writes(A_shared_dyn[0:128, 0:128])
for ax0, ax1 in T.grid(128, 128):
A_shared_dyn[ax1, ax0] = A[bx * 128 + ax0, by * 128 + ax1]
with T.block("B"):
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
for outer in T.serial(16):
for ty_1 in T.thread_binding(8, thread="threadIdx.y"):
for tx in T.thread_binding(32, thread="threadIdx.x"):
for vec in T.vectorized(4):
B[
bx * 128
+ (((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
// 128
% 128,
by * 128
+ (((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
% 128,
] = A_shared_dyn[
(((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
% 128,
(((outer * 8 + ty_1) * 32 + tx) * 4 + vec)
// 128
% 128,
]
@tvm.script.ir_module
class TransformedGlobalToSharedWithLocalStage:
@T.prim_func
def main(a: T.handle, b: T.handle):
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block(""):
T.reads(A[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.writes(B[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
A_shared_dyn = T.alloc_buffer(
(128, 128), strides=(128, 1), scope="shared.dyn"
)
with T.block("A_shared"):
T.reads(A[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.writes(A_shared_dyn[0:128, 0:128])
T.block_attr(
{"auto_copy": 1, "local_stage": True, "vector_bytes": 16}
)
A_shared_dyn_local = T.alloc_buffer((16, 4), scope="local")
for ax0_ax1_fused_1 in T.thread_binding(8, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(
32, thread="threadIdx.x"
):
for ax0_ax1_fused_0_cache in range(16):
for ax0_ax1_fused_3_cache in T.vectorized(4):
A_shared_dyn_local[
ax0_ax1_fused_0_cache
* 8
* 32
* 4
// 128
% 128
// 8,
ax0_ax1_fused_3_cache % 128,
] = A[
bx * 128
+ (
(
(
ax0_ax1_fused_0_cache * 8
+ ax0_ax1_fused_1
)
* 32
+ ax0_ax1_fused_2
)
* 4
+ ax0_ax1_fused_3_cache
)
// 128
% 128,
by * 128
+ (
(
(
ax0_ax1_fused_0_cache * 8
+ ax0_ax1_fused_1
)
* 32
+ ax0_ax1_fused_2
)
* 4
+ ax0_ax1_fused_3_cache
)
% 128,
]
for ax0_ax1_fused_0 in range(16):
for ax0_ax1_fused_3 in T.vectorized(4):
A_shared_dyn[
(
(
(ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1)
* 32
+ ax0_ax1_fused_2
)
* 4
+ ax0_ax1_fused_3
)
// 128
% 128,
(
(
(ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1)
* 32
+ ax0_ax1_fused_2
)
* 4
+ ax0_ax1_fused_3
)
% 128,
] = A_shared_dyn_local[
ax0_ax1_fused_0 * 8 * 32 * 4 // 128 % 128 // 8,
ax0_ax1_fused_3 % 128,
]
with T.block("B"):
T.reads(A_shared_dyn[0:128, 0:128])
T.writes(B[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
for ax0 in range(128):
for ax1 in range(128):
B[bx * 128 + ax0, by * 128 + ax1] = A_shared_dyn[ax0, ax1]
@tvm.script.ir_module
class TransformedSharedToWmma:
@T.prim_func
def main() -> None:
s0 = T.int32()
s1 = T.int32()
# body
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
A_shared_dyn = T.alloc_buffer(
[128, 128], dtype="float16", strides=[136, 1], scope="shared.dyn"
)
A_wmma = T.alloc_buffer(
[128, 128], dtype="float16", scope="wmma.matrix_a"
)
with T.block("C_shared"):
T.reads(A_shared_dyn[0:128, 0:128])
T.writes(A_wmma[0:128, 0:128])
T.block_attr({"auto_copy": 1})
for ax00, ax10 in T.grid(8, 8):
with T.block("wmma_load"):
T.reads(
A_shared_dyn[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
]
)
T.writes(
A_wmma[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
]
)
src = T.match_buffer(
A_shared_dyn[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
],
[16, 16],
dtype="float16",
strides=[s1, s0],
scope="shared.dyn",
offset_factor=16,
)
tgt = T.match_buffer(
A_wmma[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
],
[16, 16],
dtype="float16",
scope="wmma.matrix_a",
offset_factor=16,
)
T.evaluate(
T.tvm_load_matrix_sync(
tgt.data,
16,
16,
16,
tgt.elem_offset // 256
+ tgt.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
src.data,
src.elem_offset,
s1 * 16,
1,
dtype="handle",
),
s1,
"row_major",
dtype="handle",
)
)
@tvm.script.ir_module
class TransformedWmmaToShared:
@T.prim_func
def main() -> None:
s0 = T.int32()
s1 = T.int32()
# body
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="wmma.accumulator"
)
C_shared = T.alloc_buffer(
[128, 128], dtype="float32", strides=[136, 1], scope="shared.dyn"
)
with T.block("A_wmma"):
T.reads(C_accum[0:128, 0:128])
T.writes(C_shared[0:128, 0:128])
T.block_attr({"auto_copy": 1})
for ax00, ax10 in T.grid(8, 8):
with T.block("wmma_store"):
T.reads(
C_accum[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
]
)
T.writes(
C_shared[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
]
)
src = T.match_buffer(
C_accum[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
],
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
tgt = T.match_buffer(
C_shared[
ax00 * 16 : ax00 * 16 + 16,
ax10 * 16 : ax10 * 16 + 16,
],
[16, 16],
dtype="float32",
strides=[s1, s0],
scope="shared.dyn",
offset_factor=16,
)
T.evaluate(
T.tvm_store_matrix_sync(
src.data,
16,
16,
16,
src.elem_offset // 256
+ src.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
tgt.data,
tgt.elem_offset,
s1 * 16,
2,
dtype="handle",
),
s1,
"row_major",
dtype="handle",
)
)
@tvm.script.ir_module
class TransformedWmmaToGlobal:
@T.prim_func
def main(C: T.Buffer((1024, 1024), "float32")):
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block(""):
T.reads()
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
C_accum = T.alloc_buffer((128, 128), scope="wmma.accumulator")
with T.block("C_global"):
T.reads(C_accum[0:128, 0:128])
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
C_accum_shared_dyn = T.alloc_buffer(
(8, 8, 16, 16), strides=(2048, 256, 16, 1), scope="shared.dyn"
)
for ax0_0 in range(8):
for ax1_0 in range(8):
with T.block("wmma_store"):
T.reads(
C_accum[
ax0_0 * 16 : ax0_0 * 16 + 16,
ax1_0 * 16 : ax1_0 * 16 + 16,
]
)
T.writes(C_accum_shared_dyn[ty, ax1_0, 0:16, 0:16])
src = T.match_buffer(
C_accum[
ax0_0 * 16 : ax0_0 * 16 + 16,
ax1_0 * 16 : ax1_0 * 16 + 16,
],
(16, 16),
scope="wmma.accumulator",
offset_factor=16,
)
s1 = T.int32()
s0 = T.int32()
tgt = T.match_buffer(
C_accum_shared_dyn[ty, ax1_0, 0:16, 0:16],
(16, 16),
strides=(s1, s0),
scope="shared.dyn",
offset_factor=16,
)
T.tvm_store_matrix_sync(
src.data,
16,
16,
16,
src.elem_offset // 256
+ src.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation("float32"),
tgt.data,
tgt.elem_offset,
s1 * 16,
2,
),
s1,
"row_major",
)
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
) in range(16):
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
) in T.thread_binding(8, thread="threadIdx.y"):
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
) in T.thread_binding(32, thread="threadIdx.x"):
for ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3 in (
T.vectorized(4)
):
C[
bx * 128
+ (
ax0_0 * 16
+ (
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
% 16
),
by * 128
+ (
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
% 8
* 16
+ (
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
% 16
),
] = C_accum_shared_dyn[
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
// 8
% 8,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
% 8,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
% 16,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
% 16,
]
@tvm.script.ir_module
class TransformedWmmaToGlobalWithFusion:
@T.prim_func
def main(A: T.Buffer((1024,), "float32"), C: T.Buffer((1024, 1024), "float32")) -> None:
s0 = T.int32()
s1 = T.int32()
# body
with T.block("root"):
T.block_attr({"warp_execution": True})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block():
T.reads(A[bx * 128 : bx * 128 + 128])
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
C_accum = T.alloc_buffer(
[128, 128], dtype="float32", scope="wmma.accumulator"
)
with T.block("C_global"):
T.reads(C_accum[0:128, 0:128], A[bx * 128 : bx * 128 + 128])
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
C_accum_shared_dyn = T.alloc_buffer(
(8, 8, 16, 16), strides=(2048, 256, 16, 1), scope="shared.dyn"
)
for ax0_0 in range(8):
for ax1_0 in range(8):
with T.block("wmma_store"):
T.reads(
C_accum[
ax0_0 * 16 : ax0_0 * 16 + 16,
ax1_0 * 16 : ax1_0 * 16 + 16,
]
)
T.writes(C_accum_shared_dyn[ty, ax1_0, 0:16, 0:16])
src = T.match_buffer(
C_accum[
ax0_0 * 16 : ax0_0 * 16 + 16,
ax1_0 * 16 : ax1_0 * 16 + 16,
],
(16, 16),
scope="wmma.accumulator",
offset_factor=16,
)
s1 = T.int32()
s0 = T.int32()
tgt = T.match_buffer(
C_accum_shared_dyn[ty, ax1_0, 0:16, 0:16],
(16, 16),
strides=(s1, s0),
scope="shared.dyn",
offset_factor=16,
)
T.tvm_store_matrix_sync(
src.data,
16,
16,
16,
src.elem_offset // 256
+ src.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation("float32"),
tgt.data,
tgt.elem_offset,
s1 * 16,
2,
),
s1,
"row_major",
)
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
) in range(16):
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
) in T.thread_binding(8, thread="threadIdx.y"):
for (
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
) in T.thread_binding(32, thread="threadIdx.x"):
for ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3 in (
T.vectorized(4)
):
C[
bx * 128
+ (
ax0_0 * 16
+ (
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
% 16
),
by * 128
+ (
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
% 8
* 16
+ (
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
% 16
),
] = (
C_accum_shared_dyn[
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
// 8
% 8,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
// 16
% 8,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
% 16,
(
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
% 16,
]
+ A[
bx * 128
+ (
ax0_0 * 16
+ (
(
(
ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0
* 8
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_1
)
* 32
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_2
)
* 4
+ ty_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_3
)
// 16
% 16
)
]
)
@tvm.script.ir_module
class TransformedMmaToGlobal:
@T.prim_func
def main(C: T.Buffer((1024, 1024), "float32")):
with T.block("root"):
T.block_attr({"warp_execution": T.bool(True)})
for bx in T.thread_binding(8, thread="blockIdx.x"):
for by in T.thread_binding(8, thread="blockIdx.y"):
for ty in T.thread_binding(8, thread="threadIdx.y"):
with T.block(""):
T.reads()
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
C_accum = T.alloc_buffer((128, 128), scope="m16n8k8.matrixC")
with T.block("C_global"):
T.reads(C_accum[0:128, 0:128])
T.writes(C[bx * 128 : bx * 128 + 128, by * 128 : by * 128 + 128])
T.block_attr({"auto_copy": 1, "vector_bytes": 16})
C_accum_shared_dyn = T.alloc_buffer(
(8, 16, 8, 8), strides=(1152, 72, 8, 1), scope="shared.dyn"
)
for ax0_0 in range(16):
for ax1_0 in range(16):
with T.block("mma_store"):
T.reads(
C_accum[
ax0_0 * 8 : ax0_0 * 8 + 8,
ax1_0 * 8 : ax1_0 * 8 + 8,
]
)
T.writes(C_accum_shared_dyn[ty, ax1_0, 0:8, 0:8])
src = T.match_buffer(
C_accum[
ax0_0 * 8 : ax0_0 * 8 + 8,
ax1_0 * 8 : ax1_0 * 8 + 8,
],
(8, 8),
scope="m16n8k8.matrixC",
offset_factor=8,
)
tgt = T.match_buffer(
C_accum_shared_dyn[ty, ax1_0, 0:8, 0:8],
(8, 8),
strides=("s1", "s0"),
scope="shared.dyn",
offset_factor=8,
)
tx = T.launch_thread("threadIdx.x", 32)
for vec in T.vectorized(2):
tgt[tx // 4, tx % 4 * 2 + vec] = src[
tx // 4, tx % 4 * 2 + vec
]
for ax1_1 in range(8):
for ty_0 in T.thread_binding(8, thread="threadIdx.y"):
for tx_0 in T.thread_binding(32, thread="threadIdx.x"):
for v in T.vectorized(4):
C[
bx * 128
+ (
ax0_0 * 8
+ (
((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4
+ v
)
// 8
% 8
),
by * 128
+ (
(
((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4
+ v
)
// 8
// 8
% 16
* 8
+ (
((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4
+ v
)
% 8
),
] = C_accum_shared_dyn[
(((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4 + v)
// 8
// 8
// 16
% 8,
(((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4 + v)
// 8
// 8
% 16,
(((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4 + v)
// 8
% 8,
(((ax1_1 * 8 + ty_0) * 32 + tx_0) * 4 + v)
% 8,
]
def _check(original, transformed):
mod = tvm.tir.transform.LowerAutoCopy()(original)
tvm.ir.assert_structural_equal(mod, transformed, True)
def test_coalesce_vectorize():
_check(GlobalToShared, TransformedGlobalToShared)
def test_inverse():
_check(SharedToGlobal, TransformedSharedToGlobal)
def test_local_stage():
_check(GlobalToSharedWithLocalStage, TransformedGlobalToSharedWithLocalStage)
def test_rewrite_shared_to_wmma():
_check(SharedToWmma, TransformedSharedToWmma)
def test_rewrite_wmma_to_shared():
_check(WmmaToShared, TransformedWmmaToShared)
def test_rewrite_wmma_to_global():
_check(WmmaToGlobal, TransformedWmmaToGlobal)
def verify_single_allocation(stmt, alloc_size=None):
num_alloc = [0]
alloc_extents = []
def verify(n):
if (
isinstance(n, tvm.tir.Block)
and n.alloc_buffers is not None
and (True in ((buf.scope() == "shared.dyn") for buf in n.alloc_buffers))
):
num_alloc[0] += len(n.alloc_buffers)
for buf in n.alloc_buffers:
alloc_extents.append(buf.shape)
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
if alloc_size:
def prod(arr):
ret = 1
for element in arr:
ret *= element
return ret
assert prod(alloc_extents[0]) == alloc_size
def test_auto_padding():
mod = tvm.tir.transform.LowerAutoCopy()(Transpose)
mod = tvm.tir.transform.FlattenBuffer()(mod)
verify_single_allocation(mod["main"].body, 16 * 130)
def test_rewrite_wmma_to_global_fusion():
_check(WmmaToGlobalWithFusion, TransformedWmmaToGlobalWithFusion)
def test_rewrite_mma_to_global():
_check(MmaToGlobal, TransformedMmaToGlobal)
if __name__ == "__main__":
test_coalesce_vectorize()
test_inverse()
test_local_stage()
test_rewrite_shared_to_wmma()
test_rewrite_wmma_to_shared()
test_rewrite_wmma_to_global()
test_auto_padding()
test_rewrite_wmma_to_global_fusion()
test_rewrite_mma_to_global()
| 70,261 | 58.443316 | 130 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test runtime error handling"""
import tvm
from tvm import te
import tvm.testing
def test_op_translation():
ferror = tvm.testing.test_raise_error_callback("OpNotImplemented: myop")
try:
ferror()
assert False
except tvm.error.OpNotImplemented as e:
msg = str(e)
assert isinstance(e, NotImplementedError)
assert msg.find("ffi_testing.cc") != -1
fchk_eq = tvm.testing.test_check_eq_callback("InternalError: myop")
try:
fchk_eq(0, 1)
assert False
except tvm.error.InternalError as e:
msg = str(e)
assert msg.find("ffi_testing.cc") != -1
try:
tvm.testing.ErrorTest(0, 1)
assert False
except ValueError as e:
msg = str(e)
assert msg.find("ffi_testing.cc") != -1
def test_deep_callback():
def error_callback():
raise ValueError("callback error")
wrap1 = tvm.testing.test_wrap_callback(error_callback)
def flevel2():
wrap1()
wrap2 = tvm.testing.test_wrap_callback(flevel2)
def flevel3():
wrap2()
wrap3 = tvm.testing.test_wrap_callback(flevel3)
try:
wrap3()
assert False
except ValueError as e:
msg = str(e)
idx2 = msg.find("in flevel2")
idx3 = msg.find("in flevel3")
assert idx2 != -1 and idx3 != -1
assert idx2 > idx3
if __name__ == "__main__":
test_op_translation()
test_deep_callback()
| 2,238 | 27.341772 | 76 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_heterogeneous.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals
"""Unit tests for heterogeneous runtime"""
import json
import numpy as np
import tvm
from tvm import te
from tvm.contrib import graph_executor, utils
from tvm import topi
def get_simplex_graph(host_dev_type, device_dev_type):
r""" Return the hand-crafted json object where only one copy node is
inserted. This node copies data from the target device to cpu.
The network is constructed as following:
A B
\ /
elemwise_add (gpu)
\
copy C
\ /
elemwise_sub (cpu)
Parameters
----------
host_dev_type : int
The device type of the host processor, e.g. cpu.
device_dev_type : int
The device type of the device processor, e.g. gpu, opencl, etc.
Returns
-------
json : json
A json encoded object.
"""
# Construct each node in the graph.
var_a = {"op": "null", "name": "A", "inputs": []}
var_b = {"op": "null", "name": "B", "inputs": []}
elemwise_add = {
"op": "tvm_op",
"name": "elemwise_add",
"attrs": {
"flatten_data": "1",
"func_name": "elemwise_add",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[0, 0, 0], [1, 0, 0]],
}
copy = {
"op": "tvm_op",
"name": "__copy_add_to_sub",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[2, 0, 0]],
}
var_c = {"op": "null", "name": "C", "inputs": []}
elemwise_sub = {
"op": "tvm_op",
"name": "elemwise_sub",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_sub",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[3, 0, 0], [4, 0, 0]],
}
# Group the nodes.
nodes = [var_a, var_b, elemwise_add, copy, var_c, elemwise_sub]
arg_nodes = [0, 1, 4]
node_row_ptr = [0, 1, 2, 3, 4, 5, 6]
heads = [[5, 0, 0]]
shape = (4,)
attrs = {
"storage_id": ["list_int", [3, 4, 0, 1, 5, 2]],
"shape": ["list_shape", [shape, shape, shape, shape, shape, shape]],
"device_index": [
"list_int",
[
device_dev_type,
device_dev_type,
device_dev_type,
host_dev_type,
host_dev_type,
host_dev_type,
],
],
"dtype": ["list_int", [0, 0, 0, 0, 0, 0]],
"dltype": ["list_str", ["float32", "float32", "float32", "float32", "float32", "float32"]],
}
# Construct the graph.
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": heads,
"attrs": attrs,
}
return json.dumps(graph)
def test_simplex_data_transferring():
r"""
Test the heterogeneous execution of a simple network where data
transferring is from the target device to the host processor at runtime.
The host processor is always assumed to be cpu, and the device varies.
"""
host = "cpu"
target_host = "llvm"
host_dev = tvm.device(host)
if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
device_dev = tvm.device(device)
graph = get_simplex_graph(host_dev.device_type, device_dev.device_type)
shape = (4,)
# Create module for add whose target is the device.
tensor_a = te.placeholder(shape, name="A")
tensor_b = te.placeholder(shape, name="B")
elemwise_add = te.compute(
shape, lambda *i: tensor_a(*i) + tensor_b(*i), name="elemwise_add"
)
target = topi.cpp.TEST_create_target(device)
schedule_add = topi.cpp.cuda.schedule_injective(target, [elemwise_add])
lower_add = tvm.lower(schedule_add, [tensor_a, tensor_b, elemwise_add], name="elemwise_add")
# Insert copy. Neither compute nor schedule is required for the copy
# node. The compute will be performed at runtime which is just data
# copy from the input to the output.
tensor_copy = te.placeholder(shape, name="__copy")
# Create module for sub whose target is the host.
tensor_c = te.placeholder(shape, name="C")
elemwise_sub = te.compute(
shape, lambda *i: tensor_copy(*i) - tensor_c(*i), name="elemwise_sub"
)
schedule_sub = te.create_schedule(elemwise_sub.op)
lower_sub = tvm.lower(
schedule_sub, [tensor_copy, tensor_c, elemwise_sub], name="elemwise_sub"
)
target_flist = {target_device: lower_add, target_host: lower_sub}
target = tvm.target.Target(target, target_host)
mhost = tvm.build(target_flist, target=target)
dev = [host_dev, device_dev]
mod = graph_executor.create(graph, mhost, dev)
params = {}
params["A"] = tensor_a = np.random.uniform(size=shape).astype(tensor_a.dtype)
params["B"] = tensor_b = np.random.uniform(size=shape).astype(tensor_b.dtype)
params["C"] = tensor_c = np.random.uniform(size=shape).astype(tensor_c.dtype)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), (tensor_a + tensor_b) - tensor_c)
dev_tar = {"cuda": "cuda", "opencl": "opencl"}
for device, target in dev_tar.items():
with tvm.target.Target(device):
check_device(device, target)
def get_duplex_graph(host_dev_type, device_dev_type):
r""" Return the hand-crafted json object where two copy nodes are inserted.
Data transferring happens back-and-forth between the target device and CPU.
The network is constructed as following:
A B
\ /
elemwise_add (gpu)
\
copy C
\ /
elemwise_sub (cpu)
\
copy D
\ /
elemwise_add (gpu)
Parameters
----------
host_dev_type : int
The device type of the host processor, e.g. cpu.
device_dev_type : int
The device type of the device processor, e.g. gpu, opencl, etc.
Returns
-------
json : json
A json encoded object.
"""
# Construct each node in the graph.
var_a = {"op": "null", "name": "A", "inputs": []}
var_b = {"op": "null", "name": "B", "inputs": []}
elemwise_add0 = {
"op": "tvm_op",
"name": "elemwise_add0",
"attrs": {
"flatten_data": "1",
"func_name": "elemwise_add0",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[0, 0, 0], [1, 0, 0]],
}
copy_add_sub = {
"op": "tvm_op",
"name": "__copy_add_to_sub",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[2, 0, 0]],
}
var_c = {"op": "null", "name": "C", "inputs": []}
elemwise_sub = {
"op": "tvm_op",
"name": "elemwise_sub",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_sub",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[3, 0, 0], [4, 0, 0]],
}
copy_sub_add = {
"op": "tvm_op",
"name": "__copy_sub_to_add",
"attrs": {
"flatten_data": "0",
"func_name": "__copy",
"num_inputs": "1",
"num_outputs": "1",
},
"inputs": [[5, 0, 0]],
}
var_d = {"op": "null", "name": "D", "inputs": []}
elemwise_add1 = {
"op": "tvm_op",
"name": "elemwise_add1",
"attrs": {
"flatten_data": "0",
"func_name": "elemwise_add1",
"num_inputs": "2",
"num_outputs": "1",
},
"inputs": [[6, 0, 0], [7, 0, 0]],
}
# Group the nodes.
nodes = [
var_a,
var_b,
elemwise_add0,
copy_add_sub,
var_c,
elemwise_sub,
copy_sub_add,
var_d,
elemwise_add1,
]
arg_nodes = [0, 1, 4, 7]
node_row_ptr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
heads = [[8, 0, 0]]
shape = (4,)
attrs = {
"storage_id": ["list_int", [4, 5, 0, 1, 6, 2, 0, 7, 3]],
"shape": ["list_shape", [shape, shape, shape, shape, shape, shape, shape, shape, shape]],
"device_index": [
"list_int",
[
device_dev_type,
device_dev_type,
device_dev_type,
host_dev_type,
host_dev_type,
host_dev_type,
device_dev_type,
device_dev_type,
device_dev_type,
],
],
"dtype": ["list_int", [0, 0, 0, 0, 0, 0, 0, 0, 0]],
"dltype": [
"list_str",
[
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
"float32",
],
],
}
# Construct the graph.
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": heads,
"attrs": attrs,
}
return json.dumps(graph)
def test_duplex_data_transferring():
r"""
Test the heterogeneous execution of a simple network where data
transferring occurs back-and-forth between the target device and host
processor.
The host processor is always assumed to be cpu, and the target device
varies.
"""
host = "cpu"
target_host = "llvm"
host_dev = tvm.device(host)
if not tvm.runtime.enabled(target_host):
print("Skip test because llvm is not enabled.")
return
def check_device(device, target_device):
if not tvm.runtime.enabled(target_device):
print("Skip test because {} is not enabled.".format(target_device))
return
device_dev = tvm.device(device)
graph = get_duplex_graph(host_dev.device_type, device_dev.device_type)
shape = (4,)
# Insert copy nodes for data transferring between add and sub nodes.
# Transfers data from gpu to cpu.
copy_add_sub = te.placeholder(shape, name="__copy0")
# Transfers data from cpu to gpu.
copy_sub_add = te.placeholder(shape, name="__copy1")
# Create a module containing adds on the device.
tensor_a = te.placeholder(shape, name="A")
tensor_b = te.placeholder(shape, name="B")
tensor_d = te.placeholder(shape, name="D")
elemwise_add0 = te.compute(
shape, lambda *i: tensor_a(*i) + tensor_b(*i), name="elemwise_add0"
)
elemwise_add1 = te.compute(
shape, lambda *i: copy_sub_add(*i) + tensor_d(*i), name="elemwise_add1"
)
target = topi.cpp.TEST_create_target(device)
add_schedule0 = topi.cpp.cuda.schedule_injective(target, [elemwise_add0])
lower_add0 = tvm.lower(
add_schedule0, [tensor_a, tensor_b, elemwise_add0], name="elemwise_add0"
)
add_schedule1 = topi.cpp.cuda.schedule_injective(target, [elemwise_add1])
lower_add1 = tvm.lower(
add_schedule1, [tensor_d, copy_sub_add, elemwise_add1], name="elemwise_add1"
)
# Create module for sub whose target is the host.
tensor_c = te.placeholder(shape, name="C")
elemwise_sub = te.compute(
shape, lambda *i: copy_add_sub(*i) - tensor_c(*i), name="elemwise_sub"
)
sub_schedule = te.create_schedule(elemwise_sub.op)
lower_sub = tvm.lower(
sub_schedule, [copy_add_sub, tensor_c, elemwise_sub], name="elemwise_sub"
)
lower_add0.update(lower_add1)
target_flist = {target_device: lower_add0, target_host: lower_sub}
target = tvm.target.Target(target, target_host)
mhost = tvm.build(target_flist, target=target)
dev = [host_dev, device_dev]
params = {}
params["A"] = tensor_a = np.random.uniform(size=shape).astype(tensor_a.dtype)
params["B"] = tensor_b = np.random.uniform(size=shape).astype(tensor_b.dtype)
params["C"] = tensor_c = np.random.uniform(size=shape).astype(tensor_c.dtype)
params["D"] = tensor_d = np.random.uniform(size=shape).astype(tensor_d.dtype)
def check_verify():
mod = graph_executor.create(graph, mhost, dev)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d)
def check_load_module():
temp = utils.tempdir()
path_lib = temp.relpath("deploy.so")
mhost.export_library(path_lib)
with open(temp.relpath("deploy.json"), "w") as out_file:
out_file.write(graph)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_graph = open(temp.relpath("deploy.json")).read()
mod = graph_executor.create(loaded_graph, loaded_lib, dev)
mod.set_input(**params)
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape))
np.testing.assert_equal(out.numpy(), tensor_a + tensor_b - tensor_c + tensor_d)
check_verify()
check_load_module()
dev_tar = {"cuda": "cuda", "opencl": "opencl"}
for device, target in dev_tar.items():
with tvm.target.Target(device):
check_device(device, target)
if __name__ == "__main__":
test_simplex_data_transferring()
test_duplex_data_transferring()
| 15,238 | 33.244944 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_feature_extractor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import re
from typing import List
import numpy as np
from tvm.meta_schedule import TuneContext
from tvm.meta_schedule.feature_extractor import PyFeatureExtractor
from tvm.meta_schedule.search_strategy import MeasureCandidate
from tvm.meta_schedule.utils import derived_object
from tvm.runtime.ndarray import array
def test_meta_schedule_feature_extractor():
@derived_object
class FancyFeatureExtractor(PyFeatureExtractor):
def extract_from(
self,
context: TuneContext, # pylint: disable = unused-argument
candidates: List[MeasureCandidate], # pylint: disable = unused-argument
) -> List[np.ndarray]:
return [array(np.random.rand(4, 5))]
extractor = FancyFeatureExtractor()
features = extractor.extract_from(TuneContext(), [])
assert len(features) == 1
assert features[0].shape == (4, 5)
def test_meta_schedule_feature_extractor_as_string():
@derived_object
class NotSoFancyFeatureExtractor(PyFeatureExtractor):
def extract_from(
self,
context: TuneContext, # pylint: disable = unused-argument
candidates: List[MeasureCandidate], # pylint: disable = unused-argument
) -> List[np.ndarray]:
return []
feature_extractor = NotSoFancyFeatureExtractor()
pattern = re.compile(r"meta_schedule.NotSoFancyFeatureExtractor\(0x[a-f|0-9]*\)")
assert pattern.match(str(feature_extractor))
if __name__ == "__main__":
test_meta_schedule_feature_extractor()
test_meta_schedule_feature_extractor_as_string()
| 2,461 | 38.079365 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_search_task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import numpy as np
import tempfile
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.auto_scheduler.utils import get_const_tuple
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def test_search_task_add_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
test_input_1 = tvm.runtime.ndarray.empty((10, 20))
test_input_2 = tvm.runtime.ndarray.empty((30, 40, 50))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
"test_input_2": test_input_2,
},
task_inputs_overwrite=True,
)
assert len(task.task_input_names) == 3
assert task.task_input_names[0] == "test_input_0"
assert task.task_input_names[1] == "test_input_1"
assert task.task_input_names[2] == "test_input_2"
def test_search_task_record():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
N = 64
target = "llvm"
# Log with no task input
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
# TODO(jcf94): Check the compute dag & hardware parameter
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={"test_input_0": test_input_0},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test",
args=(N, N, N),
target=target,
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
task_record = auto_scheduler._ffi_api.SerializeSearchTask(task)
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]"""
new_task = auto_scheduler._ffi_api.DeserializeSearchTask(v5_log)
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
def test_recover_measure_input_with_task_input():
auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear()
# Since this file is tests for search_task, we only check the search_task here
# Log with no task input
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
# Log with 1 task input
test_input_0 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 1
assert new_task.task_input_names[0] == "test_input_0"
# Log with multiple task inputs
test_input_1 = tvm.runtime.ndarray.empty((64, 64))
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target="llvm",
task_inputs={
"test_input_0": test_input_0,
"test_input_1": test_input_1,
},
task_inputs_overwrite=True,
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res)
measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 2
assert new_task.task_input_names[0] == "test_input_0"
assert new_task.task_input_names[1] == "test_input_1"
# Log with version 0.5
v5_log = """{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}"""
measure_log = auto_scheduler.measure_record.load_record_from_string(v5_log)
new_task = measure_log[0].task
assert task.workload_key == new_task.workload_key
assert str(task.target) == str(new_task.target)
assert str(task.target.host) == str(new_task.target.host)
assert task.layout_rewrite_option == new_task.layout_rewrite_option
assert len(new_task.task_input_names) == 0
if __name__ == "__main__":
test_search_task_add_task_input()
test_search_task_record()
test_recover_measure_input_with_task_input()
| 8,788 | 41.254808 | 184 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import re
import os
import ctypes
def test_popcount():
target = "llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon"
def check_correct_assembly(type, elements, counts):
n = tvm.runtime.convert(elements)
A = te.placeholder(n, dtype=type, name="A")
B = te.compute(A.shape, lambda i: tvm.tir.popcount(A[i]), name="B")
s = te.create_schedule(B.op)
s[B].vectorize(s[B].op.axis[0])
f = tvm.build(s, [A, B], target)
# Verify we see the correct number of vpaddl and vcnt instructions in the assembly
assembly = f.get_source("asm")
matches = re.findall("vpaddl", assembly)
assert len(matches) == counts
matches = re.findall("vcnt", assembly)
assert len(matches) == 1
check_correct_assembly("uint16", 8, 1)
check_correct_assembly("uint16", 4, 1)
check_correct_assembly("uint32", 4, 2)
check_correct_assembly("uint32", 2, 2)
check_correct_assembly("uint64", 2, 3)
def test_vmlal_s16():
target = "llvm -mtriple=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon"
def check_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name="A")
B = te.placeholder((K, N), dtype="int8", name="B")
k = te.reduce_axis((0, K))
C = te.compute(
(N,),
lambda n: te.sum(A[k, n].astype("int32") * B[k, n].astype("int32"), axis=[k]),
name="C",
)
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
# Verify we see the correct number of vmlal.s16 instructions
assembly = f.get_source("asm")
matches = re.findall("vmlal.s16", assembly)
assert len(matches) == N // 4
check_correct_assembly(8)
check_correct_assembly(16)
check_correct_assembly(32)
check_correct_assembly(64)
def check_broadcast_correct_assembly(N):
K = te.size_var("K")
A = te.placeholder((K, N), dtype="int8", name="A")
B = te.placeholder((K,), dtype="int8", name="B")
k = te.reduce_axis((0, K))
C = te.compute(
(N,),
lambda n: te.sum(A[k, n].astype("int32") * B[k].astype("int32"), axis=[k]),
name="C",
)
s = te.create_schedule(C.op)
s[C].vectorize(s[C].op.axis[0])
f = tvm.build(s, [A, B, C], target)
# Verify we see the correct number of vmlal.s16 instructions
assembly = f.get_source("asm")
matches = re.findall("vmlal.s16", assembly)
assert len(matches) == N // 4
check_broadcast_correct_assembly(8)
check_broadcast_correct_assembly(16)
check_broadcast_correct_assembly(32)
check_broadcast_correct_assembly(64)
if __name__ == "__main__":
test_popcount()
test_vmlal_s16()
| 3,698 | 34.567308 | 90 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lower_thread_all_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
class BaseCompare(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.LowerThreadAllreduce()
class BaseFailure(BaseCompare):
expected = ValueError
class TestBasic(BaseCompare):
def before(A: T.Buffer((128, 32), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer(4096, data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
reduce_data = T.allocate([1], "float32", "local")
reduce = T.Buffer(1, data=reduce_data, scope="local")
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
T.tvm_thread_allreduce(
T.uint32(1),
A_flat[0],
T.bool(True),
reduce[0],
threadIdx_x,
)
if threadIdx_x == 0:
B[i] = reduce[0]
def expected(A: T.Buffer((128, 32), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer(4096, data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
reduce_data = T.allocate([1], "float32", "local")
reduce = T.Buffer(1, data=reduce_data, scope="local")
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask_data = T.allocate([1], "uint32", "local")
mask = T.Buffer(1, "uint32", data=mask_data, scope="local")
t0_data = T.allocate([1], "float32", "local")
t0 = T.Buffer(1, data=t0_data, scope="local")
reduce[0] = A_flat[0]
mask[0] = T.tvm_warp_activemask()
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 16, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 8, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 4, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 2, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 1, 32, 32)
reduce[0] = reduce[0] + t0[0]
reduce[0] = T.tvm_warp_shuffle(mask[0], reduce[0], 0, 32, 32)
if threadIdx_x == 0:
B[i] = reduce[0]
class TestBasicWithDeclBuffer(BaseCompare):
def before(A: T.Buffer((128, 32), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer(4096, data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
reduce = T.decl_buffer(1, dtype="float32", scope="local")
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
T.tvm_thread_allreduce(
T.uint32(1),
A_flat[0],
T.bool(True),
reduce[0],
threadIdx_x,
)
if threadIdx_x == 0:
B[i] = reduce[0]
def expected(A: T.Buffer((128, 32), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer(4096, data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
reduce = T.decl_buffer(1, dtype="float32", scope="local")
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask_data = T.allocate([1], "uint32", "local")
mask = T.Buffer(1, "uint32", data=mask_data, scope="local")
t0_data = T.allocate([1], "float32", "local")
t0 = T.Buffer(1, data=t0_data, scope="local")
reduce[0] = A_flat[0]
mask[0] = T.tvm_warp_activemask()
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 16, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 8, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 4, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 2, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 1, 32, 32)
reduce[0] = reduce[0] + t0[0]
reduce[0] = T.tvm_warp_shuffle(mask[0], reduce[0], 0, 32, 32)
if threadIdx_x == 0:
B[i] = reduce[0]
class TestReduceSummation(BaseCompare):
def before(A: T.Buffer((128, 128), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer((16384,), data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
normal_reduce_data = T.allocate([1], "float32", "local")
normal_reduce = T.Buffer(1, data=normal_reduce_data, scope="local")
reduce_data = T.allocate([1], "float32", "local")
reduce = T.Buffer(1, data=reduce_data, scope="local")
normal_reduce[0] = T.float32(0)
for ko in range(4):
normal_reduce[0] = normal_reduce[0] + A_flat[i * 128 + ko * 32 + threadIdx_x]
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
T.tvm_thread_allreduce(
T.uint32(1),
normal_reduce[0],
T.bool(True),
reduce[0],
threadIdx_x,
)
if threadIdx_x == 0:
B[i] = reduce[0]
def expected(A: T.Buffer((128, 128), "float32"), B: T.Buffer(128, "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
A_flat = T.Buffer(16384, data=A.data)
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 32)
normal_reduce_data = T.allocate([1], "float32", "local")
normal_reduce = T.Buffer(1, data=normal_reduce_data, scope="local")
reduce_data = T.allocate([1], "float32", "local")
reduce = T.Buffer(1, data=reduce_data, scope="local")
normal_reduce[0] = T.float32(0)
for ko in range(4):
normal_reduce[0] = normal_reduce[0] + A_flat[i * 128 + ko * 32 + threadIdx_x]
with T.attr(
T.comm_reducer(lambda x, y: x + y, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask_data = T.allocate([1], "uint32", "local")
mask = T.Buffer(1, "uint32", data=mask_data, scope="local")
t0_data = T.allocate([1], "float32", "local")
t0 = T.Buffer(1, data=t0_data, scope="local")
reduce[0] = normal_reduce[0]
mask[0] = T.tvm_warp_activemask()
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 16, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 8, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 4, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 2, 32, 32)
reduce[0] = reduce[0] + t0[0]
t0[0] = T.tvm_warp_shuffle_down(mask[0], reduce[0], 1, 32, 32)
reduce[0] = reduce[0] + t0[0]
reduce[0] = T.tvm_warp_shuffle(mask[0], reduce[0], 0, 32, 32)
if threadIdx_x == 0:
B[i] = reduce[0]
class TestMultiGroupReduction(BaseCompare):
@T.prim_func
def before(A: T.Buffer((32, 32), "float32"), B: T.Buffer((32,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 32)
cross_thread_B = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 32)
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
A_1 = T.Buffer((1024,), data=A.data)
T.tvm_thread_allreduce(
T.uint32(1),
A_1[threadIdx_y * 32 + threadIdx_x],
T.bool(True),
cross_thread_B_1[0],
threadIdx_x,
)
if threadIdx_x == 0:
B_1 = T.Buffer((32,), data=B.data)
B_1[threadIdx_y] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((32, 32), "float32"), B: T.Buffer((32,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 32)
red_buf0 = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 32)
red_buf0_1 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
A_1 = T.Buffer((1024,), data=A.data)
red_buf0_1[0] = A_1[threadIdx_y * 32 + threadIdx_x]
mask_1 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_1[0] = T.tvm_warp_activemask()
t0_1 = T.Buffer((1,), data=t0, scope="local")
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 16, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 8, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 4, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 2, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 1, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
red_buf0_1[0] = T.tvm_warp_shuffle(mask_1[0], red_buf0_1[0], 32 * threadIdx_y, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((32,), data=B.data)
B_1[threadIdx_y] = red_buf0_1[0]
class TestMultiGroupMask1(BaseCompare):
@T.prim_func
def before(A: T.Buffer((32, 8), "float32"), B: T.Buffer((32,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 32)
cross_thread_B = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 8)
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
A_1 = T.Buffer((256,), data=A.data)
T.tvm_thread_allreduce(
T.uint32(1),
A_1[threadIdx_y * 8 + threadIdx_x],
T.bool(True),
cross_thread_B_1[0],
threadIdx_x,
)
if threadIdx_x == 0:
B_1 = T.Buffer((32,), data=B.data)
B_1[threadIdx_y] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((32, 8), "float32"), B: T.Buffer((32,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 32)
red_buf0 = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 8)
red_buf0_1 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
A_1 = T.Buffer((256,), data=A.data)
red_buf0_1[0] = A_1[threadIdx_y * 8 + threadIdx_x]
mask_1 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_1[0] = T.bitwise_and(
T.tvm_warp_activemask(),
T.shift_left(T.uint32(255), T.uint32(8) * T.Cast("uint32", threadIdx_y)),
)
t0_1 = T.Buffer((1,), data=t0, scope="local")
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 4, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 2, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 1, 32, 32)
red_buf0_1[0] = red_buf0_1[0] + t0_1[0]
red_buf0_1[0] = T.tvm_warp_shuffle(mask_1[0], red_buf0_1[0], 8 * threadIdx_y, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((32,), data=B.data)
B_1[threadIdx_y] = red_buf0_1[0]
class TestMultiWarpReduce1(BaseCompare):
@T.prim_func
def before(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 128)
cross_thread_B = T.allocate([1], "float32", "local")
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
A_1 = T.Buffer((16384,), data=A.data)
T.tvm_thread_allreduce(
T.uint32(1),
A_1[i * 128 + threadIdx_x],
T.bool(True),
cross_thread_B_1[0],
threadIdx_x,
)
if threadIdx_x == 0:
B_1 = T.Buffer((128,), data=B.data)
B_1[i] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
for i in range(128):
threadIdx_x = T.launch_thread("threadIdx.x", 128)
red_buf0 = T.allocate([1], "float32", "local")
red_buf0_3 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
red_buf0_1 = T.allocate([1], "float32", "local")
mask_1 = T.allocate([1], "uint32", "local")
t0_1 = T.allocate([1], "float32", "local")
red_buf_staging = T.allocate([4], "float32", "shared")
red_buf0_2 = T.Buffer((1,), data=red_buf0_1, scope="local")
A_1 = T.Buffer((16384,), data=A.data)
red_buf0_2[0] = A_1[i * 128 + threadIdx_x]
mask_2 = T.Buffer((1,), "uint32", data=mask_1, scope="local")
mask_2[0] = T.tvm_warp_activemask()
t0_2 = T.Buffer((1,), data=t0_1, scope="local")
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 16, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 8, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 4, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 2, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 1, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
red_buf0_2[0] = T.tvm_warp_shuffle(mask_2[0], red_buf0_2[0], 0, 32, 32)
red_buf_staging_1 = T.Buffer((4,), data=red_buf_staging, scope="shared")
if threadIdx_x % 32 == 0:
red_buf_staging_1[threadIdx_x // 32] = red_buf0_2[0]
T.tvm_storage_sync("shared")
if threadIdx_x < 4:
red_buf0_3[0] = red_buf_staging_1[threadIdx_x]
mask_3 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_3[0] = T.bitwise_and(T.tvm_warp_activemask(), T.uint32(15))
t0_3 = T.Buffer((1,), data=t0, scope="local")
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 2, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 1, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
red_buf0_3[0] = T.tvm_warp_shuffle(mask_3[0], red_buf0_3[0], 0, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((128,), data=B.data)
B_1[i] = red_buf0_3[0]
class TestMultiWarpReduce2(BaseCompare):
@T.prim_func
def before(A: T.Buffer((1, 1024), "float32"), B: T.Buffer((1,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_x = T.launch_thread("threadIdx.x", 1024)
cross_thread_B = T.allocate([1], "float32", "local")
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
A_1 = T.Buffer((1024,), data=A.data)
T.tvm_thread_allreduce(
T.uint32(1), A_1[threadIdx_x], T.bool(True), cross_thread_B_1[0], threadIdx_x
)
if threadIdx_x == 0:
B_1 = T.Buffer((1,), data=B.data)
B_1[0] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((1, 1024), "float32"), B: T.Buffer((1,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_x = T.launch_thread("threadIdx.x", 1024)
red_buf0 = T.allocate([1], "float32", "local")
red_buf0_3 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
red_buf0_1 = T.allocate([1], "float32", "local")
mask_1 = T.allocate([1], "uint32", "local")
t0_1 = T.allocate([1], "float32", "local")
red_buf_staging = T.allocate([32], "float32", "shared")
red_buf0_2 = T.Buffer((1,), data=red_buf0_1, scope="local")
A_1 = T.Buffer((1024,), data=A.data)
red_buf0_2[0] = A_1[threadIdx_x]
mask_2 = T.Buffer((1,), "uint32", data=mask_1, scope="local")
mask_2[0] = T.tvm_warp_activemask()
t0_2 = T.Buffer((1,), data=t0_1, scope="local")
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 16, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 8, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 4, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 2, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 1, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
red_buf0_2[0] = T.tvm_warp_shuffle(mask_2[0], red_buf0_2[0], 0, 32, 32)
red_buf_staging_1 = T.Buffer((32,), data=red_buf_staging, scope="shared")
if threadIdx_x % 32 == 0:
red_buf_staging_1[threadIdx_x // 32] = red_buf0_2[0]
T.tvm_storage_sync("shared")
if threadIdx_x < 32:
red_buf0_3[0] = red_buf_staging_1[threadIdx_x]
mask_3 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_3[0] = T.tvm_warp_activemask()
t0_3 = T.Buffer((1,), data=t0, scope="local")
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 16, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 8, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 4, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 2, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 1, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
red_buf0_3[0] = T.tvm_warp_shuffle(mask_3[0], red_buf0_3[0], 0, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((1,), data=B.data)
B_1[0] = red_buf0_3[0]
class TestMultiGroupMultiWarpReduction(BaseCompare):
@T.prim_func
def before(A: T.Buffer((4, 128), "float32"), B: T.Buffer((4,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 4)
cross_thread_B = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 128)
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
A_1 = T.Buffer((512,), data=A.data)
T.tvm_thread_allreduce(
T.uint32(1),
A_1[threadIdx_y * 128 + threadIdx_x],
T.bool(True),
cross_thread_B_1[0],
threadIdx_x,
)
if threadIdx_x == 0:
B_1 = T.Buffer((4,), data=B.data)
B_1[threadIdx_y] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((4, 128), "float32"), B: T.Buffer((4,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 4)
red_buf0 = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 128)
red_buf0_3 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
red_buf0_1 = T.allocate([1], "float32", "local")
mask_1 = T.allocate([1], "uint32", "local")
t0_1 = T.allocate([1], "float32", "local")
red_buf_staging = T.allocate([16], "float32", "shared")
red_buf0_2 = T.Buffer((1,), data=red_buf0_1, scope="local")
A_1 = T.Buffer((512,), data=A.data)
red_buf0_2[0] = A_1[threadIdx_y * 128 + threadIdx_x]
mask_2 = T.Buffer((1,), "uint32", data=mask_1, scope="local")
mask_2[0] = T.tvm_warp_activemask()
t0_2 = T.Buffer((1,), data=t0_1, scope="local")
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 16, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 8, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 4, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 2, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 1, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
red_buf0_2[0] = T.tvm_warp_shuffle(mask_2[0], red_buf0_2[0], 32 * threadIdx_y, 32, 32)
red_buf_staging_1 = T.Buffer((16,), data=red_buf_staging, scope="shared")
if threadIdx_x % 32 == 0:
red_buf_staging_1[threadIdx_y * 4 + threadIdx_x // 32] = red_buf0_2[0]
T.tvm_storage_sync("shared")
if threadIdx_x < 16:
red_buf0_3[0] = red_buf_staging_1[threadIdx_x]
mask_3 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_3[0] = T.bitwise_and(
T.tvm_warp_activemask(), T.Cast("uint32", T.shift_left(15, threadIdx_y))
)
t0_3 = T.Buffer((1,), data=t0, scope="local")
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 2, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 1, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
red_buf0_3[0] = T.tvm_warp_shuffle(mask_3[0], red_buf0_3[0], 4 * threadIdx_y, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((4,), data=B.data)
B_1[threadIdx_y] = red_buf0_3[0]
class TestMultiGroupMultiWarpPredicatedReduction(BaseCompare):
@T.prim_func
def before(A: T.Buffer((2, 70), "float32"), B: T.Buffer((2,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 2)
in_thread_B = T.allocate([1], "float32", "local")
cross_thread_B = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 512)
in_thread_B_1 = T.Buffer((1,), data=in_thread_B, scope="local")
in_thread_B_1[0] = T.float32(0)
if threadIdx_x < 70:
A_1 = T.Buffer((140,), data=A.data)
in_thread_B_1[0] = in_thread_B_1[0] + A_1[threadIdx_y * 70 + threadIdx_x]
cross_thread_B_1 = T.Buffer((1,), data=cross_thread_B, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
T.tvm_thread_allreduce(
T.uint32(1), in_thread_B_1[0], T.bool(True), cross_thread_B_1[0], threadIdx_x
)
if threadIdx_x == 0:
B_1 = T.Buffer((2,), data=B.data)
B_1[threadIdx_y] = cross_thread_B_1[0]
@T.prim_func
def expected(A: T.Buffer((2, 70), "float32"), B: T.Buffer((2,), "float32")):
T.func_attr({"target": T.target("cuda", host="llvm")})
threadIdx_y = T.launch_thread("threadIdx.y", 2)
in_thread_B = T.allocate([1], "float32", "local")
red_buf0 = T.allocate([1], "float32", "local")
threadIdx_x = T.launch_thread("threadIdx.x", 512)
in_thread_B_1 = T.Buffer((1,), data=in_thread_B, scope="local")
in_thread_B_1[0] = T.float32(0)
if threadIdx_x < 70:
A_1 = T.Buffer((140,), data=A.data)
in_thread_B_1[0] = in_thread_B_1[0] + A_1[threadIdx_y * 70 + threadIdx_x]
red_buf0_3 = T.Buffer((1,), data=red_buf0, scope="local")
with T.attr(
T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]),
"reduce_scope",
T.reinterpret("handle", T.uint64(0)),
):
mask = T.allocate([1], "uint32", "local")
t0 = T.allocate([1], "float32", "local")
red_buf0_1 = T.allocate([1], "float32", "local")
mask_1 = T.allocate([1], "uint32", "local")
t0_1 = T.allocate([1], "float32", "local")
red_buf_staging = T.allocate([32], "float32", "shared")
red_buf0_2 = T.Buffer((1,), data=red_buf0_1, scope="local")
red_buf0_2[0] = in_thread_B_1[0]
mask_2 = T.Buffer((1,), "uint32", data=mask_1, scope="local")
mask_2[0] = T.tvm_warp_activemask()
t0_2 = T.Buffer((1,), data=t0_1, scope="local")
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 16, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 8, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 4, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 2, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
t0_2[0] = T.tvm_warp_shuffle_down(mask_2[0], red_buf0_2[0], 1, 32, 32)
red_buf0_2[0] = red_buf0_2[0] + t0_2[0]
red_buf0_2[0] = T.tvm_warp_shuffle(mask_2[0], red_buf0_2[0], 32 * threadIdx_y, 32, 32)
red_buf_staging_1 = T.Buffer((32,), data=red_buf_staging, scope="shared")
if threadIdx_x % 32 == 0:
red_buf_staging_1[threadIdx_y * 16 + threadIdx_x // 32] = red_buf0_2[0]
T.tvm_storage_sync("shared")
if threadIdx_x < 32:
red_buf0_3[0] = red_buf_staging_1[threadIdx_x]
mask_3 = T.Buffer((1,), "uint32", data=mask, scope="local")
mask_3[0] = T.bitwise_and(
T.tvm_warp_activemask(), T.Cast("uint32", T.shift_left(65535, threadIdx_y))
)
t0_3 = T.Buffer((1,), data=t0, scope="local")
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 8, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 4, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 2, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
t0_3[0] = T.tvm_warp_shuffle_down(mask_3[0], red_buf0_3[0], 1, 32, 32)
red_buf0_3[0] = red_buf0_3[0] + t0_3[0]
red_buf0_3[0] = T.tvm_warp_shuffle(mask_3[0], red_buf0_3[0], 16 * threadIdx_y, 32, 32)
if threadIdx_x == 0:
B_1 = T.Buffer((2,), data=B.data)
B_1[threadIdx_y] = red_buf0_3[0]
if __name__ == "__main__":
tvm.testing.main()
| 32,995 | 46.751085 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_dispatch_context.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test dispatcher.
The dispatcher can choose which template to use according
to the parameters of workload"""
from tvm import autotvm
import tvm
@autotvm.template("testing/dispatch_fallback")
def simple_template(a, b):
cfg = autotvm.get_config()
assert cfg.is_fallback
def test_fallback():
simple_template(2, 3)
def test_tophub_kinds_match():
def verify_arm_cpu(target):
best_by_targetkey = autotvm.tophub.context(target).best_by_targetkey
assert len(best_by_targetkey)
found_arm_cpu = False
for a, _ in best_by_targetkey:
if "arm_cpu" in a:
found_arm_cpu = True
break
assert found_arm_cpu
verify_arm_cpu("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod")
verify_arm_cpu("llvm -model=snapdragon835 -mtriple=arm64-linux-android -mattr=+neon")
if __name__ == "__main__":
test_fallback()
| 1,722 | 32.134615 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_unsafe_hide_buffer_access.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
@T.prim_func
def indirect_mem_access(a: T.handle, idx_a: T.handle, b: T.handle, idx_b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
IA = T.match_buffer(idx_a, [10], dtype="int32")
B = T.match_buffer(b, [128], dtype="float32")
IB = T.match_buffer(idx_b, [10], dtype="int32")
for i in range(10):
with T.block("B"):
vi = T.axis.spatial(10, i)
T.reads(A[IA[vi]], IA[vi])
T.writes(B[IB[vi]], IB[vi])
B[IB[vi]] = A[IA[vi]]
@T.prim_func
def indirect_mem_access_hide_ia(a: T.handle, idx_a: T.handle, b: T.handle, idx_b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
IA = T.match_buffer(idx_a, [10], dtype="int32")
B = T.match_buffer(b, [128], dtype="float32")
IB = T.match_buffer(idx_b, [10], dtype="int32")
for i in range(10):
with T.block("B"):
vi = T.axis.spatial(10, i)
T.reads(A[IA[vi]])
T.writes(B[IB[vi]], IB[vi])
B[IB[vi]] = A[IA[vi]]
@T.prim_func
def indirect_mem_access_hide_ib(a: T.handle, idx_a: T.handle, b: T.handle, idx_b: T.handle) -> None:
A = T.match_buffer(a, [128], dtype="float32")
IA = T.match_buffer(idx_a, [10], dtype="int32")
B = T.match_buffer(b, [128], dtype="float32")
IB = T.match_buffer(idx_b, [10], dtype="int32")
for i in range(10):
with T.block("B"):
vi = T.axis.spatial(10, i)
T.reads(A[IA[vi]], IA[vi])
T.writes(B[IB[vi]])
B[IB[vi]] = A[IA[vi]]
def test_hide_buffer_access_read():
sch = tir.Schedule(indirect_mem_access, debug_mask="all")
block_b = sch.get_block("B")
sch.unsafe_hide_buffer_access(block_b, "read", [1])
tvm.ir.assert_structural_equal(indirect_mem_access_hide_ia, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=indirect_mem_access)
def test_hide_buffer_access_write():
sch = tir.Schedule(indirect_mem_access, debug_mask="all")
block_b = sch.get_block("B")
sch.unsafe_hide_buffer_access(block_b, "write", [1])
tvm.ir.assert_structural_equal(indirect_mem_access_hide_ib, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=indirect_mem_access)
def test_hide_buffer_access_fail_buffer_type():
sch = tir.Schedule(indirect_mem_access, debug_mask="all")
block_b = sch.get_block("B")
with pytest.raises(tvm.error.TVMError):
sch.unsafe_hide_buffer_access(block_b, "opaque", [0])
def test_hide_buffer_access_fail_buffer_index():
sch = tir.Schedule(indirect_mem_access, debug_mask="all")
block_b = sch.get_block("B")
with pytest.raises(tvm.error.TVMError):
sch.unsafe_hide_buffer_access(block_b, "read", [2])
if __name__ == "__main__":
tvm.testing.main()
| 3,776 | 35.669903 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_mutator_mutate_thread_binding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm import meta_schedule as ms
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=invalid-name, no-member
@T.prim_func
def element_wise(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
# pylint: enable=invalid-name, no-member
def _sch() -> Schedule:
sch = Schedule(element_wise, debug_mask="all")
# pylint: disable=invalid-name
b0 = sch.get_block(name="C", func_name="main")
l1, l2 = sch.get_loops(block=b0)
l3 = sch.fuse(l1, l2)
v4 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=3,
)
l5, l6 = sch.split(loop=l3, factors=[None, v4])
sch.bind(loop=l5, thread_axis="blockIdx.x")
sch.bind(loop=l6, thread_axis="threadIdx.x")
# pylint: enable=invalid-name
return sch
def _make_mutator(target: Target) -> ms.Mutator:
ctx = ms.TuneContext(
mod=element_wise,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[],
mutator_probs={ms.mutator.MutateThreadBinding(): 1.0},
),
)
return list(ctx.space_generator.mutator_probs.keys())[0]
def test_mutate_thread_binding():
mutator = _make_mutator(target=Target("cuda"))
sch = _sch()
results = set()
for _ in range(100):
trace = mutator.apply(sch.trace)
decision = trace.decisions[trace.insts[-4]]
results.add(decision)
if len(results) == 5:
break
assert len(results) == 5
assert results == {0, 1, 2, 4, 5}
if __name__ == "__main__":
test_mutate_thread_binding()
| 3,023 | 31.516129 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_block_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule import DepKind
from tvm.tir.stmt_functor import post_order_visit
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def war_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
# pylint: enable=no-member,invalid-name,unused-variable
# pylint: disable=invalid-name
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise_dependency():
s = tir.ScheduleState(elementwise, debug_mask="all")
root = _get_block(s, "root")
block_b = _get_block(s, "B")
block_c = _get_block(s, "C")
# Check get_deps_by_src
(dep,) = s.get_block_scope(root).get_deps_by_src(block_b)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
# Check get_deps_by_dst
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_c)
assert dep.src.same_as(block_b)
assert dep.dst.same_as(block_c)
assert dep.kind == DepKind.RAW
def test_matmul_dependency():
s = tir.ScheduleState(matmul, debug_mask="all")
root = _get_block(s, "root")
init = _get_block(s, "init")
update = _get_block(s, "update")
# Check get_deps_by_src
p0, p1 = s.get_block_scope(root).get_deps_by_src(init)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
# Check get_deps_by_dst
p0, p1 = s.get_block_scope(root).get_deps_by_dst(update)
assert p0.src.same_as(init)
assert p0.dst.same_as(update)
assert p1.src.same_as(init)
assert p1.dst.same_as(update)
assert (p0.kind == DepKind.RAW and p1.kind == DepKind.WAW) or (
p0.kind == DepKind.WAW and p1.kind == DepKind.RAW
)
def test_war_dependency():
s = tir.ScheduleState(war_dependency, debug_mask="all")
root = _get_block(s, "root")
block_c = _get_block(s, "C")
block_b = _get_block(s, "B")
# Check get_deps_by_src
(dep,) = s.get_block_scope(root).get_deps_by_src(block_c)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
# Check get_deps_by_dst
(dep,) = s.get_block_scope(root).get_deps_by_dst(block_b)
assert dep.src.same_as(block_c)
assert dep.dst.same_as(block_b)
assert dep.kind == DepKind.WAR
if __name__ == "__main__":
tvm.testing.main()
| 5,211 | 32.410256 | 75 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lift_attr_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_coproc_lift():
ib = tvm.tir.ir_builder.create()
n = te.var("n")
cp = te.thread_axis((0, 1), "cop")
value = tvm.tir.StringImm("xxx")
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 1
with ib.if_scope(i.equal(0)):
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[j] = A[j] + 2
A[j] = A[j] + 3
A[j] = A[j] + 3
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.LiftAttrScope("coproc_uop_scope")(mod)["main"]
assert body.body.body.node == cp
# only able to lift to the common pattern of the last two fors.
ib = tvm.tir.ir_builder.create()
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 1
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
ib.scope_attr(cp, "coproc_uop_scope", value)
A[i] = A[i] + 2
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], body))
body = tvm.tir.transform.LiftAttrScope("coproc_uop_scope")(mod)["main"]
assert body.body.body.body[1].node == cp
assert len(body.body.body.body) == 2
if __name__ == "__main__":
test_coproc_lift()
| 2,512 | 35.42029 | 75 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_highlight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.script import tir as T
from tvm.script.highlight import cprint
def test_highlight_script():
@tvm.script.ir_module
class Module:
@T.prim_func
def main( # type: ignore
a: T.handle,
b: T.handle,
c: T.handle,
) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 128, 128])
B = T.match_buffer(b, [16, 128, 128])
C = T.match_buffer(c, [16, 128, 128])
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("matmul"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
with T.init():
C[vn, vi, vj] = 0.0 # type: ignore
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
Module.show()
Module["main"].show()
Module["main"].show(style="light")
Module["main"].show(style="dark")
Module["main"].show(style="ansi")
def test_cprint():
# Print string
cprint("a + 1")
# Print nodes with `script` method, e.g. PrimExpr
cprint(tvm.tir.Var("v", "int32") + 1)
# Cannot print non-Python-style codes if black installed
try:
import black
with pytest.raises(ValueError):
cprint("if (a == 1) { a +=1; }")
except ImportError:
pass
# Cannot print unsupported nodes (nodes without `script` method)
with pytest.raises(TypeError):
cprint(relay.const(1))
if __name__ == "__main__":
tvm.testing.main()
| 2,473 | 31.12987 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_get_block_access_region.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
from tvm.ir import Range
@T.prim_func
def func() -> None:
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
with T.block():
# Need add read/write region manually to avoid triggering block access region detector
T.reads([B[0, 0], C[0:16, 0:16], A[4:12, 4:12]])
T.writes([A[0:12, 0:12]])
for i, j in T.grid(8, 8):
A[i, j] = B[0, 0] + C[0, 0]
for i, j in T.grid(2, 2):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8], C[12:16, 12:16]])
T.writes([A[vi * 4 + 4 : vi * 4 + 8, vj * 4 + 4 : vj * 4 + 8]])
for i, j in T.grid(4, 4):
A[vi * 4 + 4 + i, vj * 4 + 4 + j] += C[i + 12, j + 12]
T.evaluate(D.data)
@T.prim_func
def match_buffer_func() -> None:
with T.block("root"):
A = T.alloc_buffer((128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
T.reads([])
T.writes([])
# Need add read/write region manually to avoid triggering block access region detector
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
AA = T.match_buffer(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16], (16, 16))
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(
B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8)
)
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
T.reads([])
T.writes(AA[vii, vjj])
AAA = T.match_buffer(AA[vii, vjj], ())
AAA[()] = 1.0
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def opaque_block_func() -> None:
with T.block("root"):
A = T.alloc_buffer((16, 16), "float32")
B = T.alloc_buffer((16, 16), "float32")
T.reads([])
T.writes([])
# Need add read/write region manually to avoid triggering block access region detector
for i in range(0, 16):
with T.block():
T.reads(A[i, 0:16])
T.writes([B[i, 0:16]])
for j in range(0, 16):
with T.block():
T.reads(A[i, j])
T.writes(B[i, j])
B[i, j] = A[i, j] + 1.0
@T.prim_func
def opaque_access_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
for i in T.serial(0, 8):
with T.block():
v = T.axis.S(8, i)
T.reads([A[v * 128 : v * 128 + 128]])
T.writes([B[v * 128 : v * 128 + 128]])
T.evaluate(
T.call_extern("test", B.data, v * 128, 128, A.data, v * 128, 128, dtype="float32")
)
@T.prim_func
def opaque_access_with_tvm_access_ptr_func() -> None:
A = T.alloc_buffer([1024])
B = T.alloc_buffer([1024])
C = T.alloc_buffer([1024])
with T.block("opaque"):
T.reads(A[0:1024], C[0:1024])
T.writes(B[0:1024], C[0:1024])
T.evaluate(A.access_ptr("r"))
T.evaluate(B.access_ptr("w"))
T.evaluate(C.access_ptr("rw"))
@T.prim_func
def access_in_if_then_else_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:5]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
B[i] = T.if_then_else(i < 5, A[i], 0.0, dtype="float32")
@T.prim_func
def access_in_branch_func() -> None:
A = T.alloc_buffer([8])
B = T.alloc_buffer([8])
with T.block():
T.reads([A[0:7]])
T.writes([B[0:8]])
for i in T.serial(0, 8):
if i < 5:
B[i] = A[i] + 1.0
else:
B[i] = A[i - 1]
@T.prim_func
def gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j, k, ii, jj in T.grid(4, 4, 16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = 0
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def decomposed_gemm() -> None:
A = T.alloc_buffer([16, 16], "float32")
B = T.alloc_buffer([16, 16], "float32")
C = T.alloc_buffer([16, 16], "float32")
for i, j in T.grid(4, 4):
for ii, jj in T.grid(4, 4):
with T.block("init"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
T.reads([])
T.writes(C[vi, vj])
C[vi, vj] = 0
for k, ii, jj in T.grid(16, 4, 4):
with T.block("update"):
vi = T.axis.S(16, i * 4 + ii)
vj = T.axis.S(16, j * 4 + jj)
vk = T.axis.R(16, k)
T.reads(C[vi, vj], A[vi, vk], B[vj, vk])
T.writes(C[vi, vj])
C[vi, vj] += A[vi, vk] * B[vj, vk]
@T.prim_func
def access_of_padding_pattern() -> None:
X = T.alloc_buffer([28, 28])
X_pad = T.alloc_buffer([32, 32])
Y = T.alloc_buffer([28, 28])
for i, j in T.grid(32, 32):
with T.block("padding"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X[vi - 2, vj - 2]])
T.writes([X_pad[vi, vj]])
X_pad[vi, vj] = T.if_then_else(
2 <= vi and vi < 30 and 2 <= vj and vj < 30, X[vi - 2, vj - 2], 0.0, dtype="float32"
)
with T.block("padding_reverse"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([X_pad[vi, vj]])
T.writes([Y[vi - 2, vj - 2]])
if 2 <= vi and vi < 30 and 2 <= vj and vj < 30:
Y[vi - 2, vj - 2] = X_pad[vi, vj]
def test_block_access_region_detector():
block = func.body.block.body.block
alloc_buffers = func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
D = alloc_buffers[-1]
tvm.ir.assert_structural_equal(
[tvm.tir.BufferRegion(D, [Range(0, 128), Range(0, 128)])], ret[2]
)
def test_opaque_block():
alloc_buffers = opaque_block_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
block0 = opaque_block_func.body.block.body.body.block
ret = tir.analysis.get_block_access_region(block0, buffer_var_map)
tvm.ir.assert_structural_equal(block0.reads, ret[0])
tvm.ir.assert_structural_equal(block0.writes, ret[1])
block1 = block0.body.body.block
ret = tir.analysis.get_block_access_region(block1, buffer_var_map)
tvm.ir.assert_structural_equal(block1.reads, ret[0])
tvm.ir.assert_structural_equal(block1.writes, ret[1])
def test_opaque_access():
block = opaque_access_func.body.block.body.body.block
alloc_buffers = opaque_access_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_opaque_access_with_tvm_access_ptr():
block = opaque_access_with_tvm_access_ptr_func.body.block.body.block
alloc_buffers = opaque_access_with_tvm_access_ptr_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret0[0])
tvm.ir.assert_structural_equal(block.writes, ret0[1])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
with pytest.raises(ValueError):
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_match_buffer():
root_block = match_buffer_func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
alloc_buffers = match_buffer_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
# Check block
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.writes, ret[1])
# B is opaque access
tvm.ir.assert_structural_equal(block.reads, ret[2])
# Check inner block AAA without updating buffer_var_map
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
# Since AA is not in the buffer_var_map, region of AA will not be collected.
tvm.ir.assert_structural_equal([], ret[1])
# Check inner block AAA
for match_buffer in block.match_buffers:
target_buffer = match_buffer.buffer
buffer_var_map[target_buffer.data] = target_buffer
ret = tir.analysis.get_block_access_region(block_inner, buffer_var_map)
tvm.ir.assert_structural_equal(block_inner.reads, ret[0])
tvm.ir.assert_structural_equal(block_inner.writes, ret[1])
def test_access_in_if_then_else_func():
block = access_in_if_then_else_func.body.block.body.block
alloc_buffers = access_in_if_then_else_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_in_branch_func():
block = access_in_branch_func.body.block.body.block
alloc_buffers = access_in_branch_func.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret0 = tir.analysis.get_block_read_write_region(block, buffer_var_map)
ret1 = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(ret0[0], ret1[0])
tvm.ir.assert_structural_equal(ret0[1], ret1[1])
def test_access_of_padding_pattern():
s = tvm.tir.schedule.Schedule(access_of_padding_pattern)
alloc_buffers = s.get_sref(s.get_block("root")).stmt.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
def do_compare_buffer_region(region, expect):
assert region.buffer == expect.buffer
analyzer = tvm.arith.Analyzer()
for observed_range, expected_range in zip(region.region, expect.region):
analyzer.can_prove_equal(observed_range.min, expected_range.min)
analyzer.can_prove_equal(observed_range.extent, expected_range.extent)
def do_check_block(block_name):
block = s.get_sref(s.get_block(block_name)).stmt
expect_reads = block.reads
expect_writes = block.writes
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
for i, read in enumerate(ret[0]):
do_compare_buffer_region(read, expect_reads[i])
for i, write in enumerate(ret[1]):
do_compare_buffer_region(write, expect_writes[i])
do_check_block("padding")
do_check_block("padding_reverse")
def test_access_of_reduction():
block = gemm.body.block.body.body.body.body.body.body.block
alloc_buffers = gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
def test_access_of_decompose_reduction():
init = decomposed_gemm.body.block.body.body.body[0].body.body.block
update = decomposed_gemm.body.block.body.body.body[1].body.body.body.block
alloc_buffers = decomposed_gemm.body.block.alloc_buffers
buffer_var_map = {buf.data: buf for buf in alloc_buffers}
for block in [init, update]:
ret = tir.analysis.get_block_access_region(block, buffer_var_map)
tvm.ir.assert_structural_equal(block.reads, ret[0])
tvm.ir.assert_structural_equal(block.writes, ret[1])
if __name__ == "__main__":
test_block_access_region_detector()
test_opaque_block()
test_opaque_access()
test_opaque_access_with_tvm_access_ptr()
test_match_buffer()
test_access_in_if_then_else_func()
test_access_in_branch_func()
test_access_of_padding_pattern()
test_access_of_reduction()
test_access_of_decompose_reduction()
| 14,335 | 37.850949 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import pytest
from tvm import tir
from tvm._ffi.base import TVMError
from tvm.ir.transform import PassContext
import itertools
import pytest
def build_tir_func(func):
func = func.with_attr("global_symbol", "main")
pass_ctx = PassContext.current()
if pass_ctx.config.get("tir.noalias", True):
func = func.with_attr("tir.noalias", True)
mod = tvm.IRModule({"main": func})
func = tvm.build(mod)
return func
def test_scalar_add():
# All these types should be interchangeable with each other
# E.g. float16 + float32 upconverts the float16 --> float32
# Meanwhile if an int or float or together the int will be
# cast to the float type.
lhs_types = ["float32", "float16", "int32", "int64"]
rhs_types = ["float32", "float16"]
for lhs_type, rhs_type in itertools.product(lhs_types, rhs_types):
# Input vars should be float32, we will cast to test for upcasting between them
lhs_input = tir.Var("lhs", "float32")
rhs_input = tir.Var("rhs", "float32")
lhs = tir.Cast(lhs_type, lhs_input)
rhs = tir.Cast(rhs_type, rhs_input)
output = lhs + rhs
output = tir.ret(output)
output = tir.Evaluate(output)
func = tir.PrimFunc([lhs_input, rhs_input], output)
func = build_tir_func(func)
out = func(1.0, 2.0)
assert out == 3.0
def assignment_helper(store_dtype, value_dtype):
store = tir.Var("store", dtype=store_dtype)
value = tir.Var("value", dtype=value_dtype)
tir.Let(store, value, body=store)
def test_fail_implicit_downcasts_same_type():
# These lists should be sorted
bits = [8, 16, 32, 64]
for type in ["float", "int", "uint"]:
for i in range(len(bits) - 1):
with pytest.raises(TVMError):
assignment_helper(
store_dtype=f"{type}{bits[i]}", value_dtype=f"{type}{bits[i + 1]}"
)
def test_cast_between_types():
# We should only be able to assign values with the same types
bits = [16, 32]
types = ["float", "int", "uint"]
for store_type, store_bits, value_type, value_bits in itertools.product(
types, bits, types, bits
):
store_dtype = f"{store_type}{store_bits}"
value_dtype = f"{value_type}{value_bits}"
if store_dtype == value_dtype:
assignment_helper(store_dtype, value_dtype)
else:
# TODO: we might want to allow casts between uint and int types
with pytest.raises(TVMError):
assignment_helper(store_dtype, value_dtype)
def test_ret_const():
a = tir.const(0)
b = tir.ret(a)
b = tir.Evaluate(b)
func = tir.PrimFunc([], b)
func = build_tir_func(func)
out = func()
assert out == 0
def test_control_flow_jump():
ib = tvm.tir.ir_builder.create()
a = tir.Var("a", "float32")
b = tir.Var("b", "float32")
with ib.if_scope(True):
ib.emit(tir.Evaluate(tir.ret(a)))
ib.emit(tir.Evaluate(tir.ret(b)))
stmt = ib.get()
func = tir.PrimFunc([a, b], stmt)
func = build_tir_func(func)
out = func(1.0, 2.0)
assert out == 1.0
def test_exception():
with pytest.raises(tvm.TVMError):
x = tir.Var(name=1, dtype="int")
def test_eq_ops():
a = tir.IntImm("int8", 1)
with pytest.raises(ValueError):
assert a != None
with pytest.raises(ValueError):
assert not a == None
b = tir.StringImm("abc")
assert b != None
assert not b == None
if __name__ == "__main__":
test_scalar_add()
test_ret_const()
test_control_flow_jump()
test_exception()
test_eq_ops()
| 4,443 | 31.202899 | 87 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_transform_create_io_allocates.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from typing import NamedTuple, List
import tvm
from tvm.script import tir as T
# fmt: off
@tvm.script.ir_module
class SingleInputSingleOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input_buffer_var = T.match_buffer(input, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output_buffer_var = T.match_buffer(output, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p0", dtype="handle"), output_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class TwoInputSingleOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input1: T.handle, input2: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input1_buffer_var = T.match_buffer(input1, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
input2_buffer_var = T.match_buffer(input2, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output_buffer_var = T.match_buffer(output, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input1_buffer_var.data, input2_buffer_var.data, output_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class TwoInputTwoOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input1: T.handle, input2: T.handle, output1: T.handle, output2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input1_buffer_var = T.match_buffer(input1, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
input2_buffer_var = T.match_buffer(input2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output1_buffer_var = T.match_buffer(output1, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output2_buffer_var = T.match_buffer(output2, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input1_buffer_var.data, T.lookup_param("p0", dtype="handle"), output1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input2_buffer_var.data, T.lookup_param("p1", dtype="handle"), output2_buffer_var.data, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class SingleInputTwoOutput:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def __tvm_main__(input: T.handle, output1: T.handle, output2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
input_buffer_var = T.match_buffer(input, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
output1_buffer_var = T.match_buffer(output1, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
output2_buffer_var = T.match_buffer(output2, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p0", dtype="handle"), output1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input_buffer_var.data, T.lookup_param("p1", dtype="handle"), output2_buffer_var.data, dtype="int32"))
# fmt: on
class IOInfo(NamedTuple):
"""A data structure to hold test outputs per I/O tensor"""
name: str
shape: list
dtype: str
def check_io_allocations(mod: tvm.IRModule, inputs: List[IOInfo], outputs: List[IOInfo]):
"""This function checks whether outer most allocates correspond to I/O tensors"""
found_non_io_allocate_node = False
input_name_to_info = {}
for input in inputs:
input_name_to_info[input.name] = input
output_name_to_info = {}
for output in outputs:
output_name_to_info[output.name] = output
def _visit(stmt):
nonlocal found_non_io_allocate_node
if isinstance(stmt, tvm.tir.Allocate) and not found_non_io_allocate_node:
allocate = stmt
if dict(allocate.annotations).get("input_tensor"):
input_tensor_name = str(dict(allocate.annotations).get("input_tensor"))
assert input_tensor_name in input_name_to_info.keys()
assert input_name_to_info[input_tensor_name].shape == list(allocate.extents)
assert input_name_to_info[input_tensor_name].dtype == str(allocate.dtype)
del input_name_to_info[input_tensor_name]
if dict(allocate.annotations).get("output_tensor"):
output_tensor_name = str(dict(allocate.annotations).get("output_tensor"))
assert output_tensor_name in output_name_to_info.keys()
assert output_name_to_info[output_tensor_name].shape == list(allocate.extents)
assert output_name_to_info[output_tensor_name].dtype == str(allocate.dtype)
del output_name_to_info[output_tensor_name]
else:
found_non_io_allocate_node = True
main = mod["__tvm_main__"]
tvm.tir.stmt_functor.ir_transform(main.body, _visit, None, ["tir.Allocate", "tir.Call"])
assert len(input_name_to_info) == 0
assert len(output_name_to_info) == 0
@pytest.mark.parametrize(
"test_mod, input_names, output_names",
[
(
SingleInputSingleOutput,
[IOInfo("input", [150528], "uint8")],
[IOInfo("output", [452], "int16")],
),
(
SingleInputTwoOutput,
[IOInfo("input", [150528], "uint8")],
[IOInfo("output1", [452], "int16"), IOInfo("output2", [452], "int16")],
),
(
TwoInputSingleOutput,
[IOInfo("input1", [150528], "uint8"), IOInfo("input2", [1], "int16")],
[IOInfo("output", [452], "int16")],
),
(
TwoInputTwoOutput,
[IOInfo("input1", [150528], "uint8"), IOInfo("input2", [150528], "uint8")],
[IOInfo("output1", [452], "int16"), IOInfo("output2", [452], "int16")],
),
],
)
def test_mobilenet_subgraph(test_mod, input_names, output_names):
CreateAllocatesForIO = tvm.get_global_func("tir.usmp.transform.CreateAllocatesForIO")
test_mod = CreateAllocatesForIO()(test_mod)
check_io_allocations(test_mod, input_names, output_names)
| 11,249 | 53.347826 | 188 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.